repo_name stringlengths 9 55 | path stringlengths 7 120 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 169k | license stringclasses 12 values |
|---|---|---|---|---|---|
joshbohde/scikit-learn | examples/plot_permutation_test_for_classification.py | 2 | 2049 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD
print __doc__
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
from sklearn.metrics import zero_one_score
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(svm, X, y,
zero_one_score, cv=cv,
n_permutations=100, n_jobs=1)
print "Classification score %s (pvalue : %s)" % (score, pvalue)
###############################################################################
# View histogram of permutation scores
pl.hist(permutation_scores, label='Permutation scores')
ylim = pl.ylim()
pl.vlines(score, ylim[0], ylim[1], linestyle='--',
color='g', linewidth=3, label='Classification Score'
' (pvalue %s)' % pvalue)
pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
color='k', linewidth=3, label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
| bsd-3-clause |
CG-F16-24-Rutgers/steersuite-rutgers | steerstats/tools/plotting/plotMultiObjectiveData.py | 8 | 1340 |
import csv
import matplotlib.pyplot as plt
import sys
import numpy as np
# filename = '../../data/optimization/sf/multiObjective/SteerStatsOpt2.csv'
filename = sys.argv[1]
xs = []
ys = []
if len(sys.argv) == 2:
csvfile = open(filename, 'r')
spamreader = csv.reader(csvfile, delimiter=',')
xs = []
ys = []
for row in spamreader:
xs.append(float(row[0]))
ys.append(float(row[1]))
elif len(sys.argv) == 3:
for i in range(1, int(sys.argv[2])):
tmp_filename = filename + str(i) + ".log"
csvfile = open(tmp_filename, 'r')
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
xs.append(float(row[0]))
ys.append(float(row[1]))
else:
print "Wrong"
sys.exit()
print "xs = " + str(xs)
print "ys = " + str(ys)
x_min = np.amin(xs)
x_max = np.amax(xs)
y_min = np.amin(ys)
y_max = np.amax(ys)
new_xs = (xs - x_min) / (x_max - x_min)
new_ys = (ys - y_min) / (y_max - y_min)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(new_xs, new_ys, c="b")
# ax.set_xlim([np.amin(xs), np.amax(xs)])
# ax.set_ylim([np.amin(ys), np.amax(ys)])
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_xlabel('efficency metric')
ax.set_ylabel('PLE metric')
ax.set_title("multi-objective optimization")
# plt.axis("tight")
plt.show()
| gpl-3.0 |
maxlikely/scikit-learn | sklearn/pipeline.py | 1 | 13051 | """
The :mod:`sklearn.pipeline` module implements utilites to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Licence: BSD
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
__all__ = ['Pipeline', 'FeatureUnion']
# One round of beers on me if someone finds out why the backslash
# is needed in the Attributes section so as not to upset sphinx.
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implements fit and transform methods.
The final estimator needs only implements fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svn
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y)
0.75
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % names)
self.steps = zip(names, estimators) # shallow copy of steps
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps a the chain should "
"be transforms and implement fit and transform"
"'%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
def predict_log_proba(self, X):
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform."""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
def inverse_transform(self, X):
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _fit_one_transformer(transformer, X, y):
transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
return (transformer.fit_transform(X, y, **fit_params)
* transformer_weights[name])
else:
return (transformer.fit(X, y, **fit_params).transform(X)
* transformer_weights[name])
if hasattr(transformer, 'fit_transform'):
return transformer.fit_transform(X, y, **fit_params)
else:
return transformer.fit(X, y, **fit_params).transform(X)
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformers: list of (name, transformer)
List of transformer objects to be applied to the data.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all tranformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in trans.get_params(deep=True).iteritems():
out['%s__%s' % (name, key)] = value
return out
| bsd-3-clause |
martinggww/lucasenlights | MachineLearning/python_tutorial/KNearestNeighborhood.py | 1 | 1274 | '''
Classification algorithm
Create a model that seperate a dataset
proximity probability nearest neighbors
What the hack is K?
if K=2, find the closet 2 points
We want K = odd numbers, K=3, 5, 7...
'''
'''
- - +, 66.7% confidence, confidence, accuracy
Euclid distance, euclid distance middle point
Dataset and the relavent data features
class and label
id, clump_thickness, unif_cell_size, unif_cell_shape
'''
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
df = pd.read_csv('./dataset/breastcancer.csv')
print df.head(2)
#Most algorithm will recognize it as outliers
df.replace('?',-99999, inplace=True)
#drop useless column
df.drop(['id'], 1, inplace=True)
#Convert X to np.array
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'] )
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
#Why we didn't set K???
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print accuracy
#it's also a ndarry
example_measures = np.array([[4,2,1,1,1,2,3,2,1],[4,2,1,1,1,2,3,2,1],[4,2,1,1,1,2,3,2,1]])
example_measures = example_measures.reshape(len(example_measures), -1)
score = clf.predict(example_measures)
print score
| cc0-1.0 |
treycausey/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/cluster/plot_dict_face_patches.py | 9 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
# #############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
# #############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| gpl-3.0 |
jrcapriles/gameSimulator | gameSimulator.py | 1 | 6837 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 18 19:27:57 2014
@author: joser
"""
import pygame, ode, random, Buttons
from math import atan2, acos, asin, sin, cos
import matplotlib.pyplot as plt
from pygame.locals import *
from numpy import *
from Point import *
from Buttons import *
class gameSimulator( object ):
def __init__( self, *args, **kwargs):
# Initialize pygame
pygame.init()
self.width = kwargs.get('width',600)
self.height = kwargs.get('height',400)
self.length = kwargs.get('length',200)
self.fps = kwargs.get('fps',50)
self.G = kwargs.get('gravity',-9.81)
self.world = ode.World()
self.world.setGravity((0,self.G,0))
self.createScreen()
self.createButtons()
#Variables of this game
self.FIRE = False
self.ANGLE = 0.25
self.POWER = 6.5
self.dt = 1.0/self.fps
self.buffer = []
self.bufferHead = 0
self.bufferTail = 0
self.bufferIsEmpty = True
self.bufferSize = 100
self.correctX = 0
self.correctY = 0
self.initBuffer()
def initBuffer(self):
for i in range(0,self.bufferSize):
self.buffer.append((0,0))
def createScreen(self):
# Open a display
self.srf = pygame.display.set_mode((self.width,self.height))
pygame.display.set_caption("Game Simulator")
#Parameters
self.dt = 1.0/self.fps
self.loopFlag = True
def createButtons(self):
#Buttons
self.goal_button = Buttons.Button(self.srf, color = (200,0,0), x = 10, y = 10, length = 50, height = 25, width = 0, text = "Button_1", text_color = (255,255,255), font_size = 20, fade_on = False)
self.switch_button = Buttons.Button(self.srf, color = (200,0,0), x = 60, y = 10, length = 50, height = 25, width = 0, text = "Button_2", text_color = (255,255,255), font_size = 20, fade_on = False)
self.follow_button = Buttons.Button(self.srf, color = (200,0,0), x = 110, y = 10, length = 50, height = 25, width = 0, text = "Button_3", text_color = (255,255,255), font_size = 20, fade_on = False)
self.noise_button = Buttons.Button(self.srf, color = (200,0,0), x = 160, y = 10, length = 50, height = 25, width = 0, text = "Button_4", text_color = (255,255,255), font_size = 20, fade_on = False)
#Button Dictionary
self.buttons = {0 : self.goal_button,
1 : self.switch_button,
2 : self.follow_button,
3 : self.noise_button}
def loadBackground(self,filename):
self.backgroundImage = pygame.image.load(filename).convert()
self.backgroundRect = self.backgroundImage.get_rect()
def loadImage(self, filename):
image = pygame.image.load(filename)
return image
def world2screen(self, x, y):
return int(self.width/2 + 128*x), int(self.length/2 - 128*y)
def screen2world(self, x, y):
return (float(x - self.width/2)/128), (float(-y + self.length/2)/128)
def checkEvents(self):
events = pygame.event.get()
for e in events:
if e.type==QUIT:
pygame.quit()
elif e.type==KEYDOWN:
if e.key == K_f:
print "FIRE!!!"
self.FIRE = True
self.Vox = self.POWER * cos(self.ANGLE)
self.Voy = self.POWER * sin(self.ANGLE)
elif e.key == K_UP:
self.ANGLE = self.ANGLE + 0.1
print self.POWER, self.ANGLE
elif e.key == K_DOWN:
self.ANGLE = self.ANGLE - 0.1
print self.POWER, self.ANGLE
elif e.key == K_LEFT:
self.POWER = self.POWER - 0.1
print self.POWER, self.ANGLE
elif e.key == K_RIGHT:
self.POWER = self.POWER + 0.1
print self.POWER, self.ANGLE
else:
pygame.quit()
def updateBackground(self, color = None):
if color is not None:
self.srf.fill(color)
else:
self.srf.blit(self.backgroundImage,self.backgroundRect)
def updateImage(self,image, position):
self.srf.blit(image,position)
self.addBuffer(position)
def getBuffer(self):
return zip(*self.buffer)
def addBuffer(self,newValue):
self.buffer[self.bufferHead] = newValue
if self.bufferHead == self.bufferSize-1:
self.bufferHead = 0
self.bufferTail = 0
else:
if self.bufferHead == self.bufferTail and not self.bufferIsEmpty:
self.bufferHead = self.bufferHead +1
self.bufferTail = self.bufferHead
else:
self.bufferHead = self.bufferHead +1
self.bufferIsEmpty = False
def updateTrace(self,x,y,color):
for i in range(0,self.bufferSize):
self.srf.set_at((x[i]+self.correctX,y[i]+self.correctY),color)
def run(self):
# Simulation loop.
self.clk = pygame.time.Clock()
self.loadBackground("images/screen.png")
gun = self.loadImage("images/gun.jpg")
gunPos = [50,320]
bullet = self.loadImage("images/bullet.jpg")
self.correctX, self.correctY = bullet.get_rect().size
self.correctX = self.correctX/2
self.correctY = self.correctY/2
x,y = self.screen2world(230,320)
while True:
# Check for events
self.checkEvents()
self.updateBackground()
self.updateImage(gun,gunPos)
if self.FIRE:
self.Voy = self.Voy + self.G* self.dt
x = x + self.Vox*self.dt
y = y + self.Voy*self.dt + 0.5*self.G*self.dt**2
self.updateImage(bullet, self.world2screen(x,y))
else:
if self.bufferIsEmpty is False:
plotx, ploty = self.getBuffer()
self.updateTrace(plotx, ploty,(255,255,255))
if self.FIRE and (y < -2.5 or y >2.5 or x >2.5 or x<-2.5):
self.FIRE = False
x,y = self.screen2world(230,320)
plotx, ploty = self.getBuffer()
pygame.display.flip()
# Next simulation step
self.world.step(self.dt)
# Try to keep the specified framerate
self.clk.tick(self.fps)
| mit |
stevenzhang18/Indeed-Flask | lib/pandas/tests/test_expressions.py | 9 | 16557 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
import nose
import re
from numpy.random import randn
import operator
import numpy as np
from pandas.core.api import DataFrame, Panel
from pandas.computation import expressions as expr
from pandas import compat
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_panel4d_equal)
import pandas.util.testing as tm
from numpy.testing.decorators import slow
if not expr._USE_NUMEXPR:
try:
import numexpr
except ImportError:
msg = "don't have"
else:
msg = "not using"
raise nose.SkipTest("{0} numexpr".format(msg))
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
_mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') })
_integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
_frame_panel = Panel(dict(ItemA=_frame.copy(), ItemB=(_frame.copy() + 3), ItemC=_frame.copy(), ItemD=_frame.copy()))
_frame2_panel = Panel(dict(ItemA=_frame2.copy(), ItemB=(_frame2.copy() + 3),
ItemC=_frame2.copy(), ItemD=_frame2.copy()))
_integer_panel = Panel(dict(ItemA=_integer,
ItemB=(_integer + 34).astype('int64')))
_integer2_panel = Panel(dict(ItemA=_integer2,
ItemB=(_integer2 + 34).astype('int64')))
_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
class TestExpressions(tm.TestCase):
_multiprocess_can_split_ = False
def setUp(self):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def tearDown(self):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
@nose.tools.nottest
def run_arithmetic_test(self, df, other, assert_func, check_dtype=False,
test_flex=True):
expr._MIN_ELEMENTS = 0
operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow']
if not compat.PY3:
operations.append('div')
for arith in operations:
operator_name = arith
if arith == 'div':
operator_name = 'truediv'
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, operator_name)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
result = op(df, other)
try:
if check_dtype:
if arith == 'truediv':
assert expected.dtype.kind == 'f'
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operator %r" % op.__name__)
raise
def test_integer_arithmetic(self):
self.run_arithmetic_test(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic_test(self.integer.iloc[:,0], self.integer.iloc[:, 0],
assert_series_equal, check_dtype=True)
@nose.tools.nottest
def run_binary_test(self, df, other, assert_func,
test_flex=False, numexpr_ops=set(['gt', 'lt', 'ge',
'le', 'eq', 'ne'])):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
for arith in operations:
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
try:
if arith in numexpr_ops:
assert used_numexpr, "Did not use numexpr as expected."
else:
assert not used_numexpr, "Used numexpr unexpectedly."
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operation %r" % arith)
com.pprint_thing("test_flex was %r" % test_flex)
raise
def run_frame(self, df, other, binary_comp=None, run_binary=True,
**kwargs):
self.run_arithmetic_test(df, other, assert_frame_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(df, other, assert_frame_equal, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
expr.set_use_numexpr(False)
binary_comp = other + 1
expr.set_use_numexpr(True)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=False, **kwargs)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=True, **kwargs)
def run_series(self, ser, other, binary_comp=None, **kwargs):
self.run_arithmetic_test(ser, other, assert_series_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(ser, other, assert_almost_equal,
test_flex=True, **kwargs)
# series doesn't uses vec_compare instead of numexpr...
# if binary_comp is None:
# binary_comp = other + 1
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=False,
# **kwargs)
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=True,
# **kwargs)
def run_panel(self, panel, other, binary_comp=None, run_binary=True,
assert_func=assert_panel_equal, **kwargs):
self.run_arithmetic_test(panel, other, assert_func, test_flex=False,
**kwargs)
self.run_arithmetic_test(panel, other, assert_func, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
binary_comp = other + 1
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=False, **kwargs)
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=True, **kwargs)
def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])
@slow
def test_integer_panel(self):
self.run_panel(_integer2_panel, np.random.randint(1, 100))
def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])
@slow
def test_float_panel(self):
self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)
@slow
def test_panel4d(self):
self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,
assert_func=assert_panel4d_equal, binary_comp=3)
def test_mixed_arithmetic_frame(self):
# TODO: FIGURE OUT HOW TO GET IT TO WORK...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
self.run_frame(self.mixed2, self.mixed2, run_binary=False)
def test_mixed_arithmetic_series(self):
for col in self.mixed2.columns:
self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
@slow
def test_mixed_panel(self):
self.run_panel(_mixed2_panel, np.random.randint(1, 100),
binary_comp=-2)
def test_float_arithemtic(self):
self.run_arithmetic_test(self.frame, self.frame, assert_frame_equal)
self.run_arithmetic_test(self.frame.iloc[:, 0], self.frame.iloc[:, 0],
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
self.run_arithmetic_test(self.mixed, self.mixed, assert_frame_equal)
for col in self.mixed.columns:
self.run_arithmetic_test(self.mixed[col], self.mixed[col],
assert_series_equal)
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal)
self.run_arithmetic_test(self.integer.iloc[:, 0], self.integer.iloc[:, 0],
assert_series_equal)
def test_invalid(self):
# no op
result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame, 'evaluate')
self.assertFalse(result)
# mixed
result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame, 'evaluate')
self.assertFalse(result)
# min elements
result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2, 'evaluate')
self.assertFalse(result)
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2, 'evaluate')
self.assertTrue(result)
def test_binary_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
for op, op_str in [('add','+'),('sub','-'),('mul','*'),('div','/'),('pow','**')]:
if op == 'div':
op = getattr(operator, 'truediv', None)
else:
op = getattr(operator, op, None)
if op is not None:
result = expr._can_use_numexpr(op, op_str, f, f, 'evaluate')
self.assertNotEqual(result, f._is_mixed_type)
result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_boolean_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
f11 = f
f12 = f + 1
f21 = f2
f22 = f2 + 1
for op, op_str in [('gt','>'),('lt','<'),('ge','>='),('le','<='),('eq','=='),('ne','!=')]:
op = getattr(operator,op)
result = expr._can_use_numexpr(op, op_str, f11, f12, 'evaluate')
self.assertNotEqual(result, f11._is_mixed_type)
result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_where(self):
def testit():
for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]:
for cond in [ True, False ]:
c = np.empty(f.shape,dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values+1)
expected = np.where(c, f.values, f.values+1)
tm.assert_numpy_array_equal(result,expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_bool_ops_raise_on_arithmetic(self):
df = DataFrame({'a': np.random.rand(10) > 0.5,
'b': np.random.rand(10) > 0.5})
names = 'div', 'truediv', 'floordiv', 'pow'
ops = '/', '/', '//', '**'
msg = 'operator %r not implemented for bool dtypes'
for op, name in zip(ops, names):
if not compat.PY3 or name != 'div':
f = getattr(operator, name)
err_msg = re.escape(msg % op)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df, df)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, df.b)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, True)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(False, df.a)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(False, df)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
n = 10
df = DataFrame({'a': np.random.rand(n) > 0.5,
'b': np.random.rand(n) > 0.5})
names = 'add', 'mul', 'sub'
ops = '+', '*', '-'
subs = {'+': '|', '*': '&', '-': '^'}
sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}
for op, name in zip(ops, names):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
with tm.use_numexpr(True, min_elements=5):
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
drusk/pml | pml/unsupervised/clustering.py | 1 | 11112 | # Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Clustering algorithms for unsupervised learning tasks.
@author: drusk
"""
import itertools
import random
import pandas as pd
from pml.data import model
from pml.utils.errors import UnlabelledDataSetError
from pml.utils.distance_utils import euclidean
from pml.utils.pandas_util import are_dataframes_equal
class ClusteredDataSet(model.DataSet):
"""
A collection of data which has been analysed by a clustering algorithm.
It contains both the original DataSet and the results of the clustering.
It provides methods for analysing these clustering results.
"""
def __init__(self, dataset, cluster_assignments):
"""
Creates a new ClusteredDataSet.
Args:
dataset: model.DataSet
A dataset which does not have cluster assignments.
cluster_assignments: pandas.Series
A Series with the cluster assignment for each sample in the
dataset.
"""
super(ClusteredDataSet, self).__init__(dataset.get_data_frame(),
dataset.get_labels())
self.cluster_assignments = cluster_assignments
def get_cluster_assignments(self):
"""
Retrieves the cluster assignments produced for this dataset by a
clustering algorithm.
Returns:
A pandas Series. It contains the index of the original dataset
with a numerical value representing the cluster it is a part of.
"""
return self.cluster_assignments
def calculate_purity(self):
"""
Calculate the purity, a measurement of quality for the clustering
results.
Each cluster is assigned to the class which is most frequent in the
cluster. Using these classes, the percent accuracy is then calculated.
Returns:
A number between 0 and 1. Poor clusterings have a purity close to 0
while a perfect clustering has a purity of 1.
Raises:
UnlabelledDataSetError if the dataset is not labelled.
"""
if not self.is_labelled():
raise UnlabelledDataSetError()
# get the set of unique cluster ids
clusters = set(self.cluster_assignments.values)
# find out what class is most frequent in each cluster
cluster_classes = {}
for cluster in clusters:
# get the indices of rows in this cluster
indices = self.cluster_assignments[self.cluster_assignments ==
cluster].index
# filter the labels series down to those in this cluster
cluster_labels = self.labels[indices]
# assign the most common label to be the label for this cluster
cluster_classes[cluster] = cluster_labels.value_counts().idxmax()
def get_label(cluster):
"""
Get the label for a sample based on its cluster.
"""
return cluster_classes[cluster]
# get the list of labels as determined by each cluster's most frequent
# label
labels_by_clustering = self.cluster_assignments.map(get_label)
# See how the clustering labels compare with the actual labels.
# Return the percentage of indices in agreement.
num_agreed = 0
for ind in labels_by_clustering.index:
if labels_by_clustering[ind] == self.labels[ind]:
num_agreed += 1
return float(num_agreed) / labels_by_clustering.size
def calculate_rand_index(self):
"""
Calculate the Rand index, a measurement of quality for the clustering
results. It is essentially the percent accuracy of the clustering.
The clustering is viewed as a series of decisions. There are
N*(N-1)/2 pairs of samples in the dataset to be considered. The
decision is considered correct if the pairs have the same label and
are in the same cluster, or have different labels and are in different
clusters. The number of correct decisions divided by the total number
of decisions gives the Rand index, or accuracy.
Returns:
The accuracy, a number between 0 and 1. The closer to 1, the better
the clustering.
Raises:
UnlabelledDataSetError if the dataset is not labelled.
"""
if not self.is_labelled():
raise UnlabelledDataSetError()
correct = 0
total = 0
for index_combo in itertools.combinations(self.get_sample_ids(), 2):
index1 = index_combo[0]
index2 = index_combo[1]
same_class = (self.labels[index1] == self.labels[index2])
same_cluster = (self.cluster_assignments[index1]
== self.cluster_assignments[index2])
if same_class and same_cluster:
correct += 1
elif not same_class and not same_cluster:
correct += 1
total += 1
return float(correct) / total
def create_random_centroids(dataset, k):
"""
Initializes centroids at random positions.
The random value chosen for each feature will always be limited to the
range of values found in the dataset. For example, if a certain feature
has a minimum value of 0 in the dataset, and maximum value of 9, the
value chosen will be between 0 and 9.
Args:
dataset: DataSet
The DataSet to create the random centroids for.
k: int
The number of centroids to create.
Returns:
A list of centroids. Each centroid is a pandas Series with the same
labels as the dataset's headers.
"""
min_maxs = zip(dataset.reduce_features(min).values,
dataset.reduce_features(max).values)
def rand_range(range_tuple):
"""
Generates a random floating point number in the range specified by
the tuple.
"""
return random.uniform(range_tuple[0], range_tuple[1])
return [pd.Series(map(rand_range, min_maxs),
index=dataset.feature_list(),
name=i)
for i in range(k)]
def kmeans(dataset, k=2, distance=euclidean, centroids=None):
"""
K-means clustering algorithm.
This algorithm partitions a dataset into k clusters in which each
observation (sample) belongs to the cluster with the nearest mean.
Args:
dataset: model.DataSet
The DataSet to perform the clustering on.
k: int
The number of clusters to partition the dataset into.
centroids: list of pandas Series
The initial centroids for the clusters. Defaults to None in which
case they are selected randomly.
Returns:
A ClusteredDataSet which contains the cluster assignments as well as the
original data. In the cluster assignments, each sample index is
assigned a numerical value representing the cluster it is part of.
"""
# If dataset is not already a model.DataSet object, make it one.
dataset = model.as_dataset(dataset)
# Initialize k centroids
if centroids is None:
centroids = create_random_centroids(dataset, k)
# Iteratively compute best clusters until they stabilize
assignments = None
clusters_changed = True
while clusters_changed:
centroids, new_assignments = _compute_iteration(dataset, centroids,
distance)
if are_dataframes_equal(new_assignments, assignments):
clusters_changed = False
assignments = new_assignments
return ClusteredDataSet(dataset, assignments)
def _get_distances_to_centroids(dataset, centroids, distance_measure):
"""
Calculates the calc_distance from each data point to each centroid.
Args:
dataset: model.DataSet
The DataSet whose samples are being
centroids: list of pandas Series
The centroids to compare each data point with.
Returns:
A pandas DataFrame with a row for each sample in dataset and a column
for the distance to each centroid.
"""
distances = {}
for i, centroid in enumerate(centroids):
def calc_distance(sample):
return distance_measure(sample, centroid)
distances[i] = dataset.reduce_rows(calc_distance)
# each dictionary entry is interpreted as a column
return pd.DataFrame(distances)
def _compute_iteration(dataset, centroids, distance_measure):
"""
Computes an iteration of the k-means algorithm.
Args:
dataset: model.DataSet
The dataset being clustered.
centroids: list of pandas Series
The current centroids at the start of the iteration.
Returns:
new_centroids: list of pandas Series
The updated centroids.
cluster_assignments: pandas Series
The current cluster assignments for each sample.
"""
# Calculate calc_distance from each data point to each centroid
distances = _get_distances_to_centroids(dataset, centroids,
distance_measure)
# Find each datapoint's nearest centroid
cluster_assignments = distances.idxmin(axis=1)
def nearest_centroid(sample_index):
return cluster_assignments[sample_index]
# Calculate mean position of datapoints in each centroid's cluster
new_centroids = dataset.get_data_frame().groupby(nearest_centroid).mean()
# XXX turning each row in dataframe into a series... refactor!
list_of_series = [new_centroids.ix[ind] for ind in new_centroids.index]
return list_of_series, cluster_assignments
| mit |
r39132/airflow | tests/contrib/operators/test_hive_to_dynamodb_operator.py | 7 | 5053 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
import datetime
import mock
import pandas as pd
from airflow import configuration, DAG
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
import airflow.contrib.operators.hive_to_dynamodb
configuration.load_test_config()
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class HiveToDynamoDBTransferOperatorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(
aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, get_results_mock):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch('airflow.hooks.hive_hooks.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, get_results_mock):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jiangzhonglian/MachineLearning | src/py2.x/ml/6.SVM/svm-complete_Non-Kernel.py | 1 | 13440 | #!/usr/bin/python
# coding:utf8
"""
Created on Nov 4, 2010
Update on 2017-05-18
Chapter 5 source file for Machine Learing in Action
Author: Peter/geekidentity/片刻
GitHub: https://github.com/apachecn/AiLearning
"""
from __future__ import print_function
from numpy import *
import matplotlib.pyplot as plt
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m, 1)))
self.b = 0
self.eCache = mat(zeros((self.m, 2))) # first column is valid flag
def loadDataSet(fileName):
"""loadDataSet(对文件进行逐行解析,从而得到第行的类标签和整个数据矩阵)
Args:
fileName 文件名
Returns:
dataMat 数据矩阵
labelMat 类标签
"""
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat, labelMat
def selectJrand(i, m):
"""
随机选择一个整数
Args:
i 第一个alpha的下标
m 所有alpha的数目
Returns:
j 返回一个不为i的随机数,在0~m之间的整数值
"""
j = i
while j == i:
j = int(random.uniform(0, m))
return j
def clipAlpha(aj, H, L):
"""clipAlpha(调整aj的值,使aj处于 L<=aj<=H)
Args:
aj 目标值
H 最大值
L 最小值
Returns:
aj 目标值
"""
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def calcEk(oS, k):
"""calcEk(求 Ek误差:预测值-真实值的差)
该过程在完整版的SMO算法中陪出现次数较多,因此将其单独作为一个方法
Args:
oS optStruct对象
k 具体的某一行
Returns:
Ek 预测结果与真实结果比对,计算误差Ek
"""
fXk = float(multiply(oS.alphas, oS.labelMat).T * (oS.X * oS.X[k, :].T)) + oS.b
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJ(i, oS, Ei): # this is the second choice -heurstic, and calcs Ej
"""selectJ(返回最优的j和Ej)
内循环的启发式方法。
选择第二个(内循环)alpha的alpha值
这里的目标是选择合适的第二个alpha值以保证每次优化中采用最大步长。
该函数的误差与第一个alpha值Ei和下标i有关。
Args:
i 具体的第i一行
oS optStruct对象
Ei 预测结果与真实结果比对,计算误差Ei
Returns:
j 随机选出的第j一行
Ej 预测结果与真实结果比对,计算误差Ej
"""
maxK = -1
maxDeltaE = 0
Ej = 0
# 首先将输入值Ei在缓存中设置成为有效的。这里的有效意味着它已经计算好了。
oS.eCache[i] = [1, Ei]
# print 'oS.eCache[%s]=%s' % (i, oS.eCache[i])
# print 'oS.eCache[:, 0].A=%s' % oS.eCache[:, 0].A.T
# """
# # 返回非0的:行列值
# nonzero(oS.eCache[:, 0].A)= (
# 行: array([ 0, 2, 4, 5, 8, 10, 17, 18, 20, 21, 23, 25, 26, 29, 30, 39, 46,52, 54, 55, 62, 69, 70, 76, 79, 82, 94, 97]),
# 列: array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0])
# )
# """
# print 'nonzero(oS.eCache[:, 0].A)=', nonzero(oS.eCache[:, 0].A)
# # 取行的list
# print 'nonzero(oS.eCache[:, 0].A)[0]=', nonzero(oS.eCache[:, 0].A)[0]
# 非零E值的行的list列表,所对应的alpha值
validEcacheList = nonzero(oS.eCache[:, 0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: # 在所有的值上进行循环,并选择其中使得改变最大的那个值
if k == i:
continue # don't calc for i, waste of time
# 求 Ek误差:预测值-真实值的差
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k
maxDeltaE = deltaE
Ej = Ek
return maxK, Ej
else: # 如果是第一次循环,则随机选择一个alpha值
j = selectJrand(i, oS.m)
# 求 Ek误差:预测值-真实值的差
Ej = calcEk(oS, j)
return j, Ej
def updateEk(oS, k): # after any alpha has changed update the new value in the cache
"""updateEk(计算误差值并存入缓存中。)
在对alpha值进行优化之后会用到这个值。
Args:
oS optStruct对象
k 某一列的行号
"""
# 求 误差:预测值-真实值的差
Ek = calcEk(oS, k)
oS.eCache[k] = [1, Ek]
def innerL(i, oS):
"""innerL
内循环代码
Args:
i 具体的某一行
oS optStruct对象
Returns:
0 找不到最优的值
1 找到了最优的值,并且oS.Cache到缓存中
"""
# 求 Ek误差:预测值-真实值的差
Ei = calcEk(oS, i)
# 约束条件 (KKT条件是解决最优化问题的时用到的一种方法。我们这里提到的最优化问题通常是指对于给定的某一函数,求其在指定作用域上的全局最小值)
# 0<=alphas[i]<=C,但由于0和C是边界值,我们无法进行优化,因为需要增加一个alphas和降低一个alphas。
# 表示发生错误的概率:labelMat[i]*Ei 如果超出了 toler, 才需要优化。至于正负号,我们考虑绝对值就对了。
'''
# 检验训练样本(xi, yi)是否满足KKT条件
yi*f(i) >= 1 and alpha = 0 (outside the boundary)
yi*f(i) == 1 and 0<alpha< C (on the boundary)
yi*f(i) <= 1 and alpha = C (between the boundary)
'''
if ((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
# 选择最大的误差对应的j进行优化。效果更明显
j, Ej = selectJ(i, oS, Ei)
alphaIold = oS.alphas[i].copy()
alphaJold = oS.alphas[j].copy()
# L和H用于将alphas[j]调整到0-C之间。如果L==H,就不做任何改变,直接return 0
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print("L==H")
return 0
# eta是alphas[j]的最优修改量,如果eta==0,需要退出for循环的当前迭代过程
# 参考《统计学习方法》李航-P125~P128<序列最小最优化算法>
eta = 2.0 * oS.X[i, :] * oS.X[j, :].T - oS.X[i, :] * oS.X[i, :].T - oS.X[j, :] * oS.X[j, :].T
if eta >= 0:
print("eta>=0")
return 0
# 计算出一个新的alphas[j]值
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej) / eta
# 并使用辅助函数,以及L和H对其进行调整
oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
# 更新误差缓存
updateEk(oS, j)
# 检查alpha[j]是否只是轻微的改变,如果是的话,就退出for循环。
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
print("j not moving enough")
return 0
# 然后alphas[i]和alphas[j]同样进行改变,虽然改变的大小一样,但是改变的方向正好相反
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j])
# 更新误差缓存
updateEk(oS, i)
# 在对alpha[i], alpha[j] 进行优化之后,给这两个alpha值设置一个常数b。
# w= Σ[1~n] ai*yi*xi => b = yj Σ[1~n] ai*yi(xi*xj)
# 所以: b1 - b = (y1-y) - Σ[1~n] yi*(a1-a)*(xi*x1)
# 为什么减2遍? 因为是 减去Σ[1~n],正好2个变量i和j,所以减2遍
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :] * oS.X[i, :].T - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.X[i, :] * oS.X[j, :].T
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i, :] * oS.X[j, :].T - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.X[j, :] * oS.X[j, :].T
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]):
oS.b = b2
else:
oS.b = (b1 + b2) / 2.0
return 1
else:
return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter):
"""
完整SMO算法外循环,与smoSimple有些类似,但这里的循环退出条件更多一些
Args:
dataMatIn 数据集
classLabels 类别标签
C 松弛变量(常量值),允许有些数据点可以处于分隔面的错误一侧。
控制最大化间隔和保证大部分的函数间隔小于1.0这两个目标的权重。
可以通过调节该参数达到不同的结果。
toler 容错率
maxIter 退出前最大的循环次数
Returns:
b 模型的常量值
alphas 拉格朗日乘子
"""
# 创建一个 optStruct 对象
oS = optStruct(mat(dataMatIn), mat(classLabels).transpose(), C, toler)
iter = 0
entireSet = True
alphaPairsChanged = 0
# 循环遍历:循环maxIter次 并且 (alphaPairsChanged存在可以改变 or 所有行遍历一遍)
# 循环迭代结束 或者 循环遍历所有alpha后,alphaPairs还是没变化
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
# 当entireSet=true or 非边界alpha对没有了;就开始寻找 alpha对,然后决定是否要进行else。
if entireSet:
# 在数据集上遍历所有可能的alpha
for i in range(oS.m):
# 是否存在alpha对,存在就+1
alphaPairsChanged += innerL(i, oS)
print("fullSet, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged))
iter += 1
# 对已存在 alpha对,选出非边界的alpha值,进行优化。
else:
# 遍历所有的非边界alpha值,也就是不在边界0或C上的值。
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i, oS)
print("non-bound, iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged))
iter += 1
# 如果找到alpha对,就优化非边界alpha值,否则,就重新进行寻找,如果寻找一遍 遍历所有的行还是没找到,就退出循环。
if entireSet:
entireSet = False # toggle entire set loop
elif (alphaPairsChanged == 0):
entireSet = True
print("iteration number: %d" % iter)
return oS.b, oS.alphas
def calcWs(alphas, dataArr, classLabels):
"""
基于alpha计算w值
Args:
alphas 拉格朗日乘子
dataArr feature数据集
classLabels 目标变量数据集
Returns:
wc 回归系数
"""
X = mat(dataArr)
labelMat = mat(classLabels).transpose()
m, n = shape(X)
w = zeros((n, 1))
for i in range(m):
w += multiply(alphas[i] * labelMat[i], X[i, :].T)
return w
def plotfig_SVM(xArr, yArr, ws, b, alphas):
"""
参考地址:
http://blog.csdn.net/maoersong/article/details/24315633
http://www.cnblogs.com/JustForCS/p/5283489.html
http://blog.csdn.net/kkxgx/article/details/6951959
"""
xMat = mat(xArr)
yMat = mat(yArr)
# b原来是矩阵,先转为数组类型后其数组大小为(1,1),所以后面加[0],变为(1,)
b = array(b)[0]
fig = plt.figure()
ax = fig.add_subplot(111)
# 注意flatten的用法
ax.scatter(xMat[:, 0].flatten().A[0], xMat[:, 1].flatten().A[0])
# x最大值,最小值根据原数据集dataArr[:, 0]的大小而定
x = arange(-1.0, 10.0, 0.1)
# 根据x.w + b = 0 得到,其式子展开为w0.x1 + w1.x2 + b = 0, x2就是y值
y = (-b-ws[0, 0]*x)/ws[1, 0]
ax.plot(x, y)
for i in range(shape(yMat[0, :])[1]):
if yMat[0, i] > 0:
ax.plot(xMat[i, 0], xMat[i, 1], 'cx')
else:
ax.plot(xMat[i, 0], xMat[i, 1], 'kp')
# 找到支持向量,并在图中标红
for i in range(100):
if alphas[i] > 0.0:
ax.plot(xMat[i, 0], xMat[i, 1], 'ro')
plt.show()
if __name__ == "__main__":
# 获取特征和目标变量
dataArr, labelArr = loadDataSet('data/6.SVM/testSet.txt')
# print labelArr
# b是常量值, alphas是拉格朗日乘子
b, alphas = smoP(dataArr, labelArr, 0.6, 0.001, 40)
print('/n/n/n')
print('b=', b)
print('alphas[alphas>0]=', alphas[alphas > 0])
print('shape(alphas[alphas > 0])=', shape(alphas[alphas > 0]))
for i in range(100):
if alphas[i] > 0:
print(dataArr[i], labelArr[i])
# 画图
ws = calcWs(alphas, dataArr, labelArr)
plotfig_SVM(dataArr, labelArr, ws, b, alphas)
| gpl-3.0 |
Titan-C/sympy | sympy/physics/quantum/circuitplot.py | 6 | 12937 | """Matplotlib based plotting of quantum circuits.
Todo:
* Optimize printing of large circuits.
* Get this to work with single gates.
* Do a better job checking the form of circuits to make sure it is a Mul of
Gates.
* Get multi-target gates plotting.
* Get initial and final states to plot.
* Get measurements to plot. Might need to rethink measurement as a gate
issue.
* Get scale and figsize to be handled in a better way.
* Write some tests/examples!
"""
from __future__ import print_function, division
from sympy import Mul
from sympy.core.compatibility import u, range
from sympy.external import import_module
from sympy.physics.quantum.gate import Gate, OneQubitGate, CGate, CGateS
from sympy.core.core import BasicMeta
from sympy.core.assumptions import ManagedProperties
__all__ = [
'CircuitPlot',
'circuit_plot',
'labeller',
'Mz',
'Mx',
'CreateOneQubitGate',
'CreateCGate',
]
np = import_module('numpy')
matplotlib = import_module(
'matplotlib', __import__kwargs={'fromlist': ['pyplot']},
catch=(RuntimeError,)) # This is raised in environments that have no display.
if not np or not matplotlib:
class CircuitPlot(object):
def __init__(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
def circuit_plot(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
else:
pyplot = matplotlib.pyplot
Line2D = matplotlib.lines.Line2D
Circle = matplotlib.patches.Circle
#from matplotlib import rc
#rc('text',usetex=True)
class CircuitPlot(object):
"""A class for managing a circuit plot."""
scale = 1.0
fontsize = 20.0
linewidth = 1.0
control_radius = 0.05
not_radius = 0.15
swap_delta = 0.05
labels = []
inits = {}
label_buffer = 0.5
def __init__(self, c, nqubits, **kwargs):
self.circuit = c
self.ngates = len(self.circuit.args)
self.nqubits = nqubits
self.update(kwargs)
self._create_grid()
self._create_figure()
self._plot_wires()
self._plot_gates()
self._finish()
def update(self, kwargs):
"""Load the kwargs into the instance dict."""
self.__dict__.update(kwargs)
def _create_grid(self):
"""Create the grid of wires."""
scale = self.scale
wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float)
gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float)
self._wire_grid = wire_grid
self._gate_grid = gate_grid
def _create_figure(self):
"""Create the main matplotlib figure."""
self._figure = pyplot.figure(
figsize=(self.ngates*self.scale, self.nqubits*self.scale),
facecolor='w',
edgecolor='w'
)
ax = self._figure.add_subplot(
1, 1, 1,
frameon=True
)
ax.set_axis_off()
offset = 0.5*self.scale
ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset)
ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset)
ax.set_aspect('equal')
self._axes = ax
def _plot_wires(self):
"""Plot the wires of the circuit diagram."""
xstart = self._gate_grid[0]
xstop = self._gate_grid[-1]
xdata = (xstart - self.scale, xstop + self.scale)
for i in range(self.nqubits):
ydata = (self._wire_grid[i], self._wire_grid[i])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
if self.labels:
init_label_buffer = 0
if self.inits.get(self.labels[i]): init_label_buffer = 0.25
self._axes.text(
xdata[0]-self.label_buffer-init_label_buffer,ydata[0],
render_label(self.labels[i],self.inits),
size=self.fontsize,
color='k',ha='center',va='center')
self._plot_measured_wires()
def _plot_measured_wires(self):
ismeasured = self._measurements()
xstop = self._gate_grid[-1]
dy = 0.04 # amount to shift wires when doubled
# Plot doubled wires after they are measured
for im in ismeasured:
xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale)
ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy)
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
# Also double any controlled lines off these wires
for i,g in enumerate(self._gates()):
if isinstance(g, CGate) or isinstance(g, CGateS):
wires = g.controls + g.targets
for wire in wires:
if wire in ismeasured and \
self._gate_grid[i] > self._gate_grid[ismeasured[wire]]:
ydata = min(wires), max(wires)
xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def _gates(self):
"""Create a list of all gates in the circuit plot."""
gates = []
if isinstance(self.circuit, Mul):
for g in reversed(self.circuit.args):
if isinstance(g, Gate):
gates.append(g)
elif isinstance(self.circuit, Gate):
gates.append(self.circuit)
return gates
def _plot_gates(self):
"""Iterate through the gates and plot each of them."""
for i, gate in enumerate(self._gates()):
gate.plot_gate(self, i)
def _measurements(self):
"""Return a dict {i:j} where i is the index of the wire that has
been measured, and j is the gate where the wire is measured.
"""
ismeasured = {}
for i,g in enumerate(self._gates()):
if getattr(g,'measurement',False):
for target in g.targets:
if target in ismeasured:
if ismeasured[target] > i:
ismeasured[target] = i
else:
ismeasured[target] = i
return ismeasured
def _finish(self):
# Disable clipping to make panning work well for large circuits.
for o in self._figure.findobj():
o.set_clip_on(False)
def one_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a single qubit gate."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def two_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a two qubit gate. Doesn't work yet.
"""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]+0.5
print(self._gate_grid)
print(self._wire_grid)
obj = self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def control_line(self, gate_idx, min_wire, max_wire):
"""Draw a vertical control line."""
xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx])
ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def control_point(self, gate_idx, wire_idx):
"""Draw a control point."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.control_radius
c = Circle(
(x, y),
radius*self.scale,
ec='k',
fc='k',
fill=True,
lw=self.linewidth
)
self._axes.add_patch(c)
def not_point(self, gate_idx, wire_idx):
"""Draw a NOT gates as the circle with plus in the middle."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.not_radius
c = Circle(
(x, y),
radius,
ec='k',
fc='w',
fill=False,
lw=self.linewidth
)
self._axes.add_patch(c)
l = Line2D(
(x, x), (y - radius, y + radius),
color='k',
lw=self.linewidth
)
self._axes.add_line(l)
def swap_point(self, gate_idx, wire_idx):
"""Draw a swap point as a cross."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
d = self.swap_delta
l1 = Line2D(
(x - d, x + d),
(y - d, y + d),
color='k',
lw=self.linewidth
)
l2 = Line2D(
(x - d, x + d),
(y + d, y - d),
color='k',
lw=self.linewidth
)
self._axes.add_line(l1)
self._axes.add_line(l2)
def circuit_plot(c, nqubits, **kwargs):
"""Draw the circuit diagram for the circuit with nqubits.
Parameters
==========
c : circuit
The circuit to plot. Should be a product of Gate instances.
nqubits : int
The number of qubits to include in the circuit. Must be at least
as big as the largest `min_qubits`` of the gates.
"""
return CircuitPlot(c, nqubits, **kwargs)
def render_label(label, inits={}):
"""Slightly more flexible way to render labels.
>>> from sympy.physics.quantum.circuitplot import render_label
>>> render_label('q0')
'$|q0\\\\rangle$'
>>> render_label('q0', {'q0':'0'})
'$|q0\\\\rangle=|0\\\\rangle$'
"""
init = inits.get(label)
if init:
return r'$|%s\rangle=|%s\rangle$' % (label, init)
return r'$|%s\rangle$' % label
def labeller(n, symbol='q'):
"""Autogenerate labels for wires of quantum circuits.
Parameters
==========
n : int
number of qubits in the circuit
symbol : string
A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc.
>>> from sympy.physics.quantum.circuitplot import labeller
>>> labeller(2)
['q_1', 'q_0']
>>> labeller(3,'j')
['j_2', 'j_1', 'j_0']
"""
return ['%s_%d' % (symbol,n-i-1) for i in range(n)]
class Mz(OneQubitGate):
"""Mock-up of a z measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mz'
gate_name_latex=u'M_z'
class Mx(OneQubitGate):
"""Mock-up of an x measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mx'
gate_name_latex=u'M_x'
class CreateOneQubitGate(ManagedProperties):
def __new__(mcl, name, latexname=None):
if not latexname:
latexname = name
return BasicMeta.__new__(mcl, name + "Gate", (OneQubitGate,),
{'gate_name': name, 'gate_name_latex': latexname})
def CreateCGate(name, latexname=None):
"""Use a lexical closure to make a controlled gate.
"""
if not latexname:
latexname = name
onequbitgate = CreateOneQubitGate(name, latexname)
def ControlledGate(ctrls,target):
return CGate(tuple(ctrls),onequbitgate(target))
return ControlledGate
| bsd-3-clause |
LiaoPan/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
joernhees/scikit-learn | sklearn/ensemble/weight_boosting.py | 29 | 41090 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..utils.extmath import stable_cumsum
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
sample_weight = check_array(sample_weight, ensure_2d=False)
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
random_state = check_random_state(self.random_state)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight,
random_state)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight,
random_state)
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if n_classes == 1:
return np.ones((X.shape[0], 1))
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = stable_cumsum(sample_weight)
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
NolanBecker/aima-python | grading/neuralNet-submissions.py | 4 | 2217 | import importlib
import traceback
from grading.util import roster, print_table
# from logic import FolKB
# from utils import expr
import os
from sklearn.neural_network import MLPClassifier
mlpc = MLPClassifier()
def indent(howMuch = 1):
space = ' '
for i in range(1, howMuch):
space += ' '
return space
def tryOne(label, fAndP):
frame = fAndP['frame']
if 'mlpc' in fAndP.keys():
clf = fAndP['mlpc']
else:
clf = mlpc
try:
fit = clf.fit(frame.data, frame.target)
except:
pass
print('')
# print_table(fit.theta_,
# header=[frame.feature_names],
# topLeft=[label],
# leftColumn=frame.target_names,
# numfmt='%6.3f',
# njust='center',
# tjust='rjust',
# )
y_pred = fit.predict(frame.data)
print("Number of mislabeled points out of a total %d points : %d"
% (len(frame.data), (frame.target != y_pred).sum()))
def tryExamples(examples):
for label in examples:
example = examples[label]
main = getattr(example, 'main', None)
if main != None:
example.main()
else:
tryOne(label, example)
submissions = {}
scores = {}
message1 = 'Submissions that compile:'
root = os.getcwd()
for student in roster:
try:
os.chdir(root + '/submissions/' + student)
# http://stackoverflow.com/a/17136796/2619926
mod = importlib.import_module('submissions.' + student + '.myNN')
submissions[student] = mod.Examples
message1 += ' ' + student
except ImportError:
pass
except:
traceback.print_exc()
os.chdir(root)
print(message1)
print('----------------------------------------')
for student in roster:
if not student in submissions.keys():
continue
scores[student] = []
try:
examples = submissions[student]
print('Bayesian Networks from:', student)
tryExamples(examples)
except:
traceback.print_exc()
print(student + ' scores ' + str(scores[student]) + ' = ' + str(sum(scores[student])))
print('----------------------------------------') | mit |
guillemborrell/gtable | tests/test_table_creation.py | 1 | 4044 | from gtable import Table
import numpy as np
import pandas as pd
def test_empty_table():
t = Table()
assert t.data == []
def test_simple_table():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
assert t.to_dict()['a'][2] == 3
assert np.all(t.index == np.ones((2, 3), dtype=np.uint8))
def test_hcat_table():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t.add_column('c', [7, 8, 9])
assert np.all(t.index == np.ones((3, 3), dtype=np.uint8))
assert np.all(t.c.values == np.array([7, 8, 9]))
t.add_column('d', [0, 1, 2, 3, 4])
assert np.all(t.index == np.array([[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 1]], dtype=np.uint8))
def test_vcat_table():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
assert np.all(t.index == np.array([[1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1]], dtype=np.uint8))
def test_compute_column():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
t.c = t.a + t.a/2
records = [r for r in t.records()]
assert records == [
{'a': 1, 'b': 4, 'c': 1.5},
{'a': 2, 'b': 5, 'c': 3.0},
{'a': 3, 'b': 6, 'c': 4.5},
{'a': 1, 'd': 4, 'c': 1.5},
{'a': 2, 'd': 5, 'c': 3.0},
{'a': 3, 'd': 6, 'c': 4.5}]
def test_compute_wrong_size():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
t.c = t.a + t.b/2
assert np.all(t.c.values == np.array([3, 4.5, 6]))
assert np.all(t.c.index == np.array([1, 1, 1, 0, 0, 0]))
def test_add_array():
t = Table()
t.a = np.arange(10)
assert t.__repr__()[:13] == "<Table[ a[10]"
def test_add_one():
tb = Table({'a': pd.date_range('2000-01-01', freq='M', periods=10),
'b': np.random.randn(10)})
tb.add_column('schedule', np.array(['first']))
assert np.all(tb.index == np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
def test_vcat_heterogeneous():
tb = Table({'a': pd.date_range('2000-01-01', freq='M', periods=3),
'b': np.random.randn(3)})
tb.add_column('schedule', np.array(['first']))
tb1 = tb.copy()
tb1.schedule.values[0] = 'second'
tb.stack(tb1)
assert np.all(tb.index == np.array([[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0]], dtype=np.uint8))
assert np.all(tb.schedule.values == np.array(['first', 'secon']))
def test_from_pandas():
df = pd.DataFrame(
{'a': [1, 2, 3, 4],
'b': np.arange(4, dtype=np.float64),
'c': pd.date_range('2002-01-01', periods=4, freq='M')}
)
t = Table.from_pandas(df)
assert np.all(t.a.values == df.a.values)
assert np.all(t.b.values == df.b.values)
assert np.all(t.c.values == df.c.values)
assert np.all(t.idx.values == df.index.values)
def test_from_pandas_sparse():
df = pd.DataFrame(
{'a': [1, 2, 3, np.nan],
'b': np.arange(4, dtype=np.float64),
'c': pd.date_range('2002-01-01', periods=4, freq='M')}
)
t = Table.from_pandas(df)
assert np.all(t.index == np.array(
[[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=np.uint8))
assert np.all(t.a.values == np.array([1,2,3], dtype=np.float64))
def test_simple_rename():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t.rename_column('a', 'c')
assert t.keys == ['c', 'b']
| bsd-3-clause |
sysid/nbs | ml_old/Prognose/Evaluator.py | 1 | 3134 | from twBase import * # NOQA
from pandas import DataFrame
from pandas import read_csv
from pandas import datetime
from sklearn.preprocessing import MinMaxScaler
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
def get_time():
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
class DataHarness(object):
def __init__(self, fn, isSample=False):
if isinstance(fn, str):
self.data = self.load(fn, isSample)
else:
self.data = fn
self.differenced = self.diff(n=1, axis=0)
self.supervised = self.series_to_supervised(n_in=3, n_out=2)
self.train, self.test = self.split(0.8)
# fit scaler on train data
self.scaler = MinMaxScaler(feature_range=(-1, 1))
self.scaler = self.scaler.fit(self.train)
log.info("Rescaling data...")
self.train_scaled = self.scaler.transform(self.train)
self.test_scaled = self.scaler.transform(self.test)
def load(self, fn, isSample=False):
'''
Loads csv data from fn
Return: numpy array
'''
# load dataset
log.info("Loading dataset", fn=fn)
if isSample:
log.info("Loading only sample.")
nrows = 100
else:
nrows = None
df = read_csv(fn, sep=';', decimal=',', nrows=nrows)
cols = ['Ist [MWh]']
data = df[cols]
return data.values
def diff(self, **kwargs):
'''
The n-th differences. The shape of the output is the same as a except along axis where the dimension is smaller by n
Paramters analog np.diff
'''
log.info("Differencing...")
return np.diff(self.data, **kwargs)
def series_to_supervised(self, **kwargs):
log.info("Transforming for supervised learning...")
supervised = series_to_supervised(self.differenced, **kwargs)
return supervised.values
def split(self, split):
log.info("Splitting data")
splitIndex = int(split * len(self.data))
return self.supervised[:splitIndex], self.supervised[splitIndex:]
def invert_scale(self, data):
log.info("Inverting scaling..")
assert self.scaler is not None, "scaler scaler instantiated"
return self.scaler.inverse_transform(data)
def invert_diff(self, data, start_value):
data = np.insert(data, 0, start_value, axis=0)
result = np.cumsum(data)
return result[1:]
def main(argv=None):
logging.basicConfig(format="", stream=sys.stderr, level=logging.DEBUG)
logcfg(sys.stderr, logging.DEBUG, RenderEnum.console)
log = structlog.get_logger(__name__)
twStart()
DATA_FN = './data/Lastprognose/2015.csv'
evaluator = DataHarness(DATA_FN)
data = evaluator.invert_scale(evaluator.train)
diffdata = evaluator.invert_diff(evaluator.train[0], evaluator.data[0])
assert np.array_equal(diffdata, np.squeeze(evaluator.data[1:6].reshape((1,5))))
twEnd()
return 0 # success
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
jjo31/ATHAM-Fluidity | tests/gls-Kato_Phillips-mixed_layer_depth/mixed_layer_depth_all.py | 4 | 4600 | #!/usr/bin/env python
from numpy import arange,concatenate,array,argsort
import os
import sys
import vtktools
import math
from pylab import *
from matplotlib.ticker import MaxNLocator
import re
from scipy.interpolate import UnivariateSpline
import glob
#### taken from http://www.codinghorror.com/blog/archives/001018.html #######
def sort_nicely( l ):
""" Sort the given list in the way that humans expect.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
l.sort( key=alphanum_key )
##############################################################################
# compute the mixed layer depth over time
def MLD(filelist):
x0 = 0.
tke0 = 1.0e-5
last_mld = 0
times = []
depths = []
for file in filelist:
try:
os.stat(file)
except:
print "No such file: %s" % file
sys.exit(1)
u=vtktools.vtu(file)
time = u.GetScalarField('Time')
tt = time[0]
kk = u.GetScalarField('GLSTurbulentKineticEnergy')
pos = u.GetLocations()
if (tt < 100):
continue
xyzkk = []
for i in range(0,len(kk)):
if( abs(pos[i,0] - x0) < 0.1 ):
xyzkk.append((pos[i,0],-pos[i,1],pos[i,2],(kk[i])))
xyzkkarr = vtktools.arr(xyzkk)
III = argsort(xyzkkarr[:,1])
xyzkkarrsort = xyzkkarr[III,:]
# march down the column, grabbing the last value above tk0 and the first
# one less than tke0. Interpolate between to get the MLD
kea = 1000
keb = 0
zza = 0
zzb = 0
for values in xyzkkarrsort:
if (values[3] > tke0):
kea = values[3]
zza = -values[1]
if (values[3] < tke0):
keb = values[3]
zzb = -values[1]
break
mld = zza
if (last_mld == mld):
continue
times.append(tt/3600)
depths.append(-1.0*mld)
last_mld = mld
return times, depths
path = sys.argv[1]
x0 = 0.
tke0 = 1.0e-5
files_to_look_through_ke = [
"Kato_Phillips-mld-k_e-CA",
"Kato_Phillips-mld-k_e-CB",
"Kato_Phillips-mld-k_e-GL",
"Kato_Phillips-mld-k_e-KC"
]
files_to_look_through_gen = [
"Kato_Phillips-mld-gen-CA",
"Kato_Phillips-mld-gen-CB",
"Kato_Phillips-mld-gen-GL",
"Kato_Phillips-mld-gen-KC"
]
files_to_look_through_kw = [
"Kato_Phillips-mld-k_w-CA",
"Kato_Phillips-mld-k_w-CB",
"Kato_Phillips-mld-k_w-GL",
"Kato_Phillips-mld-k_w-KC"
]
files_to_look_through_kkl = [
"Kato_Phillips-mld-k_kl-CA",
"Kato_Phillips-mld-k_kl-KC"
]
colours = ['r','g','b','#8000FF']
times2 = arange(0, 10, 0.1)
Dm = 1.05*1.0e-2*(1.0/sqrt(0.01))*sqrt((times2*60*60));
figke = figure(figsize=(9.172,4.5),dpi=90)
ax = figke.add_subplot(111)
i = 0
for simulation in files_to_look_through_ke:
filelist = glob.glob(path+simulation+"*.vtu")
sort_nicely(filelist)
times, depths = MLD(filelist)
ax.plot(times,depths,colours[i],label=simulation)
i = i+1
ax.plot(times2,Dm,'k-',label='Analytical')
ax.set_ylim(ax.get_ylim()[::-1])
xlabel('Time (hours)')
ylabel('ML Depth (m)')
legend(loc=0)
savefig(path + '/ke.png', dpi=90,format='png')
figkw = figure(figsize=(9.172,4.5),dpi=90)
ax = figkw.add_subplot(111)
i = 0
for simulation in files_to_look_through_kw:
filelist = glob.glob(path+simulation+"*.vtu")
sort_nicely(filelist)
times, depths = MLD(filelist)
ax.plot(times,depths,colours[i],label=simulation)
i = i+1
ax.plot(times2,Dm,'k-',label='Analytical')
ax.set_ylim(ax.get_ylim()[::-1])
xlabel('Time (hours)')
ylabel('ML Depth (m)')
legend(loc=0)
savefig(path + '/kw.png', dpi=90,format='png')
figgen = figure(figsize=(9.172,4.5),dpi=90)
ax = figgen.add_subplot(111)
i = 0
for simulation in files_to_look_through_gen:
filelist = glob.glob(path+simulation+"*.vtu")
sort_nicely(filelist)
times, depths = MLD(filelist)
ax.plot(times,depths,colours[i],label=simulation)
i = i+1
ax.plot(times2,Dm,'k-',label='Analytical')
ax.set_ylim(ax.get_ylim()[::-1])
xlabel('Time (hours)')
ylabel('ML Depth (m)')
legend(loc=0)
savefig(path + '/gen.png', dpi=90,format='png')
figkkl = figure(figsize=(9.172,4.5),dpi=90)
ax = figkkl.add_subplot(111)
i = 0
for simulation in files_to_look_through_kkl:
filelist = glob.glob(path+simulation+"*.vtu")
sort_nicely(filelist)
times, depths = MLD(filelist)
ax.plot(times,depths,colours[i],label=simulation)
i = i+1
ax.plot(times2,Dm,'k-',label='Analytical')
ax.set_ylim(ax.get_ylim()[::-1])
xlabel('Time (hours)')
ylabel('ML Depth (m)')
legend(loc=0)
savefig(path + '/kkl.png', dpi=90,format='png')
| lgpl-2.1 |
OpenSourcePolicyCenter/multi-country | Python/Archive/Stage4/AuxiliaryClass.py | 2 | 116307 | from __future__ import division
import csv
import time
import numpy as np
import scipy as sp
import scipy.optimize as opt
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import AuxiliaryDemographics as demog
#from pure_cython import cy_fillca
class OLG(object):
"""
This object takes all of the parts of calculating the OG multi-country model and stores it into a centralized object. This
has a huge advantage over previous versions as we are now able to quickly access stored parts when we are trying
to expand the code. Before, we had to carefully pass tuples of parameters everywhere and it was easy to get lost in the details.
The variables are listed in alphabetical order of their data type, then alphabetical order of
of their name, so Arrays are listed first, Booleans second, etc.
For each function there are the following categories:
Description: Brief description of what the function does
Inputs: Lists the inputs that the function uses
Variables Called From Object: Lists the variables that the function calls from storage
Variables Stored in Object: Lists the variables that are put into storage
Other Functions Called: Lists the other non-library functions needed to complete the process of the current function
Objects in Function: Lists the variables that are exclusive to that function and are not used again.
Outputs: Lists the outputs that the function puts out.
"""
def __init__(self, countries, HH_Params, Firm_Params, Lever_Params):
"""
Description:
-This creates the object and stores all of the parameters into the object.
-The initialization is the starting point for model, think of this as the
"foundation" for the object.
Inputs:
-self: "self" stores all of the components of the model. To access any part,
simply type "self.variable_name" while in the object and "objectname.variable_name"
outside the object. Every other object function will just take this as given, so
future mentions of self won't be rewritten.
-countries = tuple: contains a dictionary and tuple for countries and their associated number, (i.e USA is country 0, EU
is country 1, etc.)
-Firm_Params = tuple: contains alpha, annualized delta, chi, rho and g_A
-HH_Params = tuple: contains S, I, annualized Beta and sigma.
-Lever_Params = tuple: contains the following boolean levers indicated by the users:
PrintAges,self.CheckerMode,self.Iterate,self.UseDiffDemog,self.UseDiffProductivities,self.Matrix_Time
Variables Stored in Object:
- self.A = Array: [I], Technology level for each country
- self.agestopull = Array: [S], Contains which ages to be used from the data when S<80
- self.e = Array: [I,S,T+S], Labor Productivities
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.lbar = Array: [T+S], Time endowment in each year
- self.CheckerMode = Boolean: Used in conjunction with Checker.py, an MPI code that checks the
robustness of the code. With this activated, the code only prints
the statements that are necessary. This speeds up the robust check
process.
- self.Iterate = Boolean: Activates printing the iteration number and euler errors at each
step of the TPI process.
- PrintAges = Boolean: Prints the ages calculated in the demographics
- self.UseDiffDemog = Boolean: Allows each country to have different demographics
- self.UseDiffProductivities = Boolean: Allows cohorts of different ages to produce different labor productivities
- self.Matrix_Time = Boolean: Prints how long it takes to calculate the 2 parts of the household problem
- self.ShaveTime = Boolean: Activates the use of the Cython module that allows the code to work faster
- self.I_dict = Dictionary: [I], Associates a country with a number
- self.I_touse = List: [I], Roster of countries that are being used
- self.alpha = Scalar: Capital share of production
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference Parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.LastFertilityAge = Int: Last age where agents give birth
- self.LeaveHouseAge = Int: First age where agents don't count as children in utility function
- self.MaxImmigrantAge = Int: No immigration takes place for cohorts older than this age
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.T_1 = Int: Transition year for the demographics
- self.Timepath_counter = Int: Counter that keeps track of the number of iterations in solving for the time paths
- self.IterationsToShow = Set: A set of user inputs of iterations of TPI graphs to show
Other Functions Called:
- getkeyages = Gets the important ages for calculating demographic dynamics like FirstFertilityAge, etc.
- Importdata = Imports the demographic data from CSV files
Objects in Function:
- beta_annual = Scalar: Annualized value for beta. Adjusted by S and stored as self.beta
- delta_annual = Scalar: Annualized value for delta. Adjusted by S and stored as self.delta
"""
#PARAMETER SET UP
#HH Parameters
(self.S, self.I, beta_annual, self.sigma) = HH_Params
self.beta=beta_annual**(70/self.S)
self.T = int(round(6*self.S))
self.T_1 = self.S
if self.S > 50:
self.T_1 = 50
#Demographics Parameters
self.I_dict, self.I_touse = countries
#Firm Parameters
(self.alpha,delta_annual,self.chi,self.rho, self.g_A)= Firm_Params
self.delta=1-(1-delta_annual)**(70/self.S)
#Lever Parameters
(PrintAges,self.CheckerMode,self.Iterate,self.UseDiffDemog,self.UseDiffProductivities,self.Matrix_Time,self.ShaveTime) = Lever_Params
#Getting key ages for calculating demographic dynamics
self.LeaveHouseAge, self.FirstFertilityAge, self.LastFertilityAge,\
self.MaxImmigrantAge, self.FirstDyingAge, self.agestopull = demog.getkeyages(self.S,PrintAges)
if self.UseDiffDemog:
self.A = np.ones(self.I)+np.cumsum(np.ones(self.I)*.05)-.05 #Techonological Change, used for when countries are different
else:
self.A = np.ones(self.I) #Techonological Change, used for idential countries
#Initialize Labor Productivities
if self.UseDiffProductivities:
self.e = np.ones((self.I, self.S, self.T+self.S))
self.e[:,self.FirstDyingAge:,:] = 0.3
self.e[:,:self.LeaveHouseAge,:] = 0.3
else:
self.e = np.ones((self.I, self.S, self.T+self.S)) #Labor productivities
self.e_ss=self.e[:,:,-1]
#Initilize Time Endowment
self.lbar = np.cumsum(np.ones(self.T+self.S)*self.g_A)
self.lbar[self.T:] = np.ones(self.S)
self.lbar[:self.T] = np.ones(self.T)
self.lbar_ss=self.lbar[-1]
#Imports all of the data from .CSV files needed for the model
self.Import_Data()
#Initialize counter that will keep track of the number of iterations the time path solver takes
self.Timepath_counter = 1
#DEMOGRAPHICS SET-UP
def Import_Data(self):
"""
Description:
- This function activates importing the .CSV files that contain our demographics data
Variables Called from Object:
- self.agestopull = Array: [S], Contains which ages to be used from the data when S<80
- self.UseDiffDemog = Boolean: True activates using unique country demographic data
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of Time Periods
- self.FirstFertilityAge = Int: First age where agents give birth
- self.LastFertilityAge = Int: Last age where agents give birth
Variables Stored in Object:
- self.all_FertilityAges = Array: [I,S,f_range+T], Fertility rates from a f_range years ago to year T
- self.FertilityRates = Array: [I,S,T], Fertility rates from the present time to year T
- self.Migrants = Array: [I,S,T], Number of immigrants
- self.MortalityRates = Array: [I,S,T], Mortality rates of each country for each age cohort and year
- self.N = Array: [I,S,T], Population of each country for each age cohort and year
- self.Nhat = Array: [I,S,T], World population share of each country for each age cohort and year
Other Functions Called:
- None
Objects in Function:
- f_range = Int: Number of fertile years, will be used to correctly store the fertilty data
- index = Int: Unique index for a given country that corresponds to the I_dict
- f_bar = Array: [I,S], Average fertility rate across all countries and cohorts in year T_1,
used to get the SS demographics
- rho_bar = Array: [I,S], Average mortality rate across all countries and cohorts in year T_1,
used to get the SS demographics
Outputs:
- None
"""
self.frange=self.LastFertilityAge+1-self.FirstFertilityAge
self.N=np.zeros((self.I,self.S,self.T))
self.Nhat=np.zeros((self.I,self.S,self.T))
self.all_FertilityRates = np.zeros((self.I, self.S, self.frange+self.T))
self.FertilityRates = np.zeros((self.I, self.S, self.T))
self.MortalityRates = np.zeros((self.I, self.S, self.T))
self.Migrants = np.zeros((self.I, self.S, self.T))
I_all = list(sorted(self.I_dict, key=self.I_dict.get))
#We loop over each country to import its demographic data
for i in xrange(self.I):
#If the bool UseDiffDemog == True, we get the unique country index number for importing from the .CSVs
if self.UseDiffDemog:
index = self.I_dict[self.I_touse[i]]
#Otherwise we just only use the data for one specific country
else:
index = 0
#Importing the data and correctly storing it in our demographics matrices
self.N[i,:,0] = np.loadtxt(("Data_Files/population.csv"),delimiter=',',\
skiprows=1, usecols=[index+1])[self.agestopull]*1000
self.all_FertilityRates[i,self.FirstFertilityAge:self.LastFertilityAge+1,\
:self.frange+self.T_1] = np.transpose(np.loadtxt(str("Data_Files/" + I_all[index] + "_fertility.csv"),delimiter=',',skiprows=1\
,usecols=(self.agestopull[self.FirstFertilityAge:self.LastFertilityAge+1]-22))[48-self.frange:48+self.T_1,:])
self.MortalityRates[i,self.FirstDyingAge:,:self.T_1] = np.transpose(np.loadtxt(str("Data_Files/" + I_all[index] + "_mortality.csv")\
,delimiter=',',skiprows=1, usecols=(self.agestopull[self.FirstDyingAge:]-67))[:self.T_1,:])
self.Migrants[i,:self.MaxImmigrantAge,:self.T_1] = np.einsum("s,t->st",np.loadtxt(("Data_Files/net_migration.csv"),delimiter=','\
,skiprows=1, usecols=[index+1])[self.agestopull[:self.MaxImmigrantAge]]*100, np.ones(self.T_1))
#Gets initial population share
self.Nhat[:,:,0] = self.N[:,:,0]/np.sum(self.N[:,:,0])
#Increases fertility rates to account for different number of periods lived
self.all_FertilityRates = self.all_FertilityRates*80/self.S
self.MortalityRates = self.MortalityRates*80/self.S
#The last generation dies with probability 1
self.MortalityRates[:,-1,:] = np.ones((self.I, self.T))
#Gets steady-state values for all countries by taking the mean at year T_1-1 across countries
f_bar = np.mean(self.all_FertilityRates[:,:,self.frange+self.T_1-1], axis=0)
rho_bar = np.mean(self.MortalityRates[:,:,self.T_1-1], axis=0)
#Set to the steady state for every year beyond year T_1
self.all_FertilityRates[:,:,self.frange+self.T_1:] = np.tile(np.expand_dims(f_bar, axis=2), (self.I,1,self.T-self.T_1))
self.MortalityRates[:,:,self.T_1:] = np.tile(np.expand_dims(rho_bar, axis=2), (self.I,1,self.T-self.T_1))
#FertilityRates is exactly like all_FertilityRates except it begins at time t=0 rather than time t=-self.frange
self.FertilityRates[:,self.FirstFertilityAge:self.LastFertilityAge+1,:] = self.all_FertilityRates[:,self.FirstFertilityAge:self.LastFertilityAge+1,self.frange:]
def Demographics(self, demog_ss_tol, UseSSDemog=False):
"""
Description:
- This function calculates the population dynamics and steady state from the imported data by doing the following:
1. For each year from now until year T, uses equations 3.11 and 3.12 to find the net population in a new year.
(Note however that after year T_1 the fertility, mortality, and immigration rates are set to be the same across countries)
2. Divides the new population by the world population to get the population share for each country and cohort
3. While doing steps 1-2, finds the immigration rate since the data only gives us net migration
4. After getting the population dynamics until year T, we continue to get population shares of future years beyond time T
as explained in steps 1-2 until it converges to a steady state
5. Stores the new steady state and non-steady state variables of population shares and mortality in the OLG object
Inputs:
- UseSSDemog = Boolean: True uses the steady state demographics in calculating the transition path. Mostly used for debugging purposes
- demog_ss_tol = Scalar: The tolerance for the greatest absolute difference between 2 years' population shares
before it is considered to be the steady state
Variables Called from Object:
- self.N = Array: [I,S,T], Population of each country for each age cohort and year
- self.Nhat = Array: [I,S,T], World opulation share of each country for each age cohort and year
- self.FertilityRates = Array: [I,S,T], Fertility rates from the present time to year T
- self.Migrants = Array: [I,S,T], Number of immigrants
- self.MortalityRates = Array: [I,S,T], Mortality rates of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of Time Periods
- self.T_1 = Int: Transition year for the demographics
Variables Stored in Object:
- self.ImmigrationRates = Array: [I,S,T], Immigration rates of each country for each age cohort and year
- self.Kids = Array: [I,S,T], Matrix that stores the per-household number of kids in each country and each time period
- self.Kids_ss = Array: [I,S], Steady state per-household number of kids for each country at each age
- self.N = Array: [I,S,T], UPDATED population of each country for each age cohort and year
- self.Nhat = Array: [I,S,T+S], UPDATED world population share of each country for each age cohort and year
- self.Nhat_ss = Array: [I,S], Population of each country for each age cohort in the steady state
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.MortalityRates = Array: [I,S,T+S], UPDATED mortality rates of each country for each age cohort and year
Other Functions Called:
- None
Objects in Function:
- pop_old = Array: [I,S,T], Population shares in a given year beyond T
that is compared with pop_new to determine the steady state
- pop_new = Array: [I,S,T], Population shares in a given year beyond T
that is compared with pop_old to determine the steady state
- kidsvec = Array: [I,f_range], extracts each cohorts number of kids in each period
- future_year_iter = Int: Counter that keeps track of how many years beyond T it takes
for the population shares to converge to the steady state
Outputs:
- None
"""
#Initializes immigration rates
self.ImmigrationRates = np.zeros((self.I,self.S,self.T))
self.Kids=np.zeros((self.I,self.S,self.T))
#Getting the population and population shares from the present to year T
for t in xrange(1,self.T):
#Gets new babies born this year (Equation 3.11)
self.N[:,0,t] = np.sum((self.N[:,:,t-1]*self.FertilityRates[:,:,t-1]), axis=1)
#Get the immigration RATES for the past year
#If before the transition year T_1, just divide total migrants by population
if t <= self.T_1:
self.ImmigrationRates[:,:,t-1] = self.Migrants[:,:,t-1]/self.N[:,:,t-1]*80/self.S
#If beyond the transition year T_1, average the immigration rates in year T_1 itself
else:
self.ImmigrationRates[:,:,t-1] = np.mean(self.ImmigrationRates[:,:,self.T_1-1],\
axis=0)*80/self.S
#Gets the non-newborn population for the next year (Equation 3.12)
self.N[:,1:,t] = self.N[:,:-1,t-1]*(1+self.ImmigrationRates[:,:-1,t-1]-self.MortalityRates[:,:-1,t-1])
#Gets the population share by taking a fraction of the total world population this year
self.Nhat[:,:,t] = self.N[:,:,t]/np.sum(self.N[:,:,t])
#Gets the number of kids each agent has in this period
for s in xrange(self.FirstFertilityAge,self.LastFertilityAge+self.LeaveHouseAge):
kidsvec = np.diagonal(self.all_FertilityRates[:,s-self.LeaveHouseAge+1:s+1,t:t+self.LeaveHouseAge],axis1=1, axis2=2)
self.Kids[:,s,t-1] = np.sum(kidsvec,axis=1)
#Gets Immigration rates for the final year
self.ImmigrationRates[:,:,-1] = np.mean(self.ImmigrationRates[:,:,self.T_1-1],axis=0)*80/self.S
#Gets Kids for the final year (just the steady state)
self.Kids[:,:,-1] = self.Kids[:,:,-2]
#Initialize iterating variables to find the steady state population shares
pop_old = self.N[:,:,-1]
pop_new = self.N[:,:,-1]
future_year_iter = 0
#Calculates new years of population shares until the greatest absolute difference between 2 consecutive years is less than demog_ss_tol
while np.max(np.abs(self.Nhat[:,:,-1] - self.Nhat[:,:,-2])) > demog_ss_tol:
pop_new[:,0] = np.sum((pop_old[:,:]*self.FertilityRates[:,:,-1]),axis=1)
pop_new[:,1:] = pop_old[:,:-1]*(1+self.ImmigrationRates[:,:-1,-1]-self.MortalityRates[:,:-1,-1])
self.Nhat = np.dstack((self.Nhat,pop_new/np.sum(pop_new)))
future_year_iter += 1
#Stores the steady state year in a seperate matrix
self.Nhat_ss = self.Nhat[:,:,-1]
self.Mortality_ss=self.MortalityRates[:,:,-1]
self.Kids_ss = self.Kids[:,:,-1]
#Deletes all the years between t=T and the steady state calculated in the while loop
self.Nhat = self.Nhat[:,:,:self.T]
#Imposing the ss for years after self.T
self.Nhat = np.dstack(( self.Nhat[:,:,:self.T], np.einsum("is,t->ist",self.Nhat_ss,np.ones(self.S)) ))
#Imposing the ss for years after self.T
self.MortalityRates = np.dstack(( self.MortalityRates[:,:,:self.T], np.einsum("is,t->ist",self.Mortality_ss, np.ones(self.S)) ))
#Imposing the ss for years after self.T
self.Kids = np.dstack(( self.Kids[:,:,:self.T], np.einsum("is,t->ist",self.Kids_ss, np.ones(self.S)) ))
#Overwrites all the years in the transition path with the steady state if UseSSDemog == True
if UseSSDemog == True:
self.Nhat = np.einsum("is,t->ist",self.Nhat_ss,np.ones(self.T+self.S))
self.MortalityRates = np.einsum("is,t->ist",self.Mortality_ss,np.ones(self.T+self.S))
self.Kids = np.einsum("is,t->ist",self.Kids_ss,np.ones(self.T+self.S))
def plotDemographics(self, T_touse="default", compare_across="T", data_year=0):
"""
Description: This calls the plotDemographics function from the AuxiliaryDemographics.py file. See it for details
"""
ages = self.LeaveHouseAge, self.FirstFertilityAge, self.LastFertilityAge, self.FirstDyingAge, self.MaxImmigrantAge
datasets = self.FertilityRates, self.MortalityRates, self.ImmigrationRates, self.Nhat, self.Kids
#Calls the Auxiliary Demographics file for this function
demog.plotDemographics(ages, datasets, self.I, self.S, self.T, self.I_touse, T_touse, compare_across, data_year)
def immigrationplot(self):
subplotdim_dict = {2:221, 3:221, 4:221, 5:231, 6:231, 7:241}
colors = ["blue","green","red","cyan","purple","yellow","brown"]
fig = plt.figure()
fig.suptitle("Immigration Rates")
for i in range(self.I):
ax = fig.add_subplot(subplotdim_dict[self.I]+i, projection='3d')
S, T = np.meshgrid(range(self.S), range(self.T))
ax.plot_surface(S, T, np.transpose(self.ImmigrationRates[i,:,:self.T]), color=colors[i])
ax.set_zlim3d(np.min(self.ImmigrationRates[i,:,:self.T]), np.max(self.ImmigrationRates[:,:,:self.T])*1.05)
ax.set_title(self.I_touse[i])
ax.set_xlabel('S')
ax.set_ylabel('T')
plt.show()
#STEADY STATE
def get_Gamma(self, w, e):
"""
Description:
- Gets the calculation of gamma
Inputs:
- w = Array: [I,T+S] or [I], Wage rate for either the transition path or the steady steady-state
- e = Array: [I,S,T+S] or [I,S], Labor productivities for either the transition path or the steady steady-state
Variables Called From Object:
- self.chi = Scalar: Leisure preference parameter
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Outputs:
- Gamma = Array: [I,S,T+S] or [I,S], Gamma values for each country
"""
#If getting the SS
if e.ndim == 2:
we = np.einsum("i,is->is", w, e)
#If getting transition path
elif e.ndim == 3:
we = np.einsum("it, ist -> ist", w, e)
Gamma = ( ( 1+self.chi*(self.chi/we)**(self.rho-1) )**((1-self.rho*self.sigma)/(self.rho-1)) ) ** (-1/self.sigma)
return Gamma
def get_lhat(self,c,w,e):
"""
Description:
- Gets household leisure based on equation 3.20
Inputs:
- c = Array: [I,S,T+S] or [I,S], Consumption for either the transition path or the steady steady-state
- w = Array: [I,T+S] or [I], Wage rate for either the transition path or the steady steady-state
- e = Array: [I,S,T+S] or [I,S], Labor productivities for either the transition path or the steady steady-state
Variables Called from Object:
- self.chi = Scalar: Leisure preference parameter
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- lhat = Array: [I,S,T+S] or [I,S], Leisure for either the transition path or the steady steady-state
"""
if e.ndim == 2:
we = np.einsum("i,is->is",w,e)
elif e.ndim == 3:
we = np.einsum("it,ist->ist",w,e)
lhat=c*(self.chi/we)**self.rho
return lhat
def get_n(self, lhat):
"""
Description:
-Calculates the aggregate labor productivity based on equation (3.14)
Inputs:
- lhat = Array: [I,S,T+S] or [I,S], Leisure for either the transition path or the steady steady-state
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor productivities for the transition path
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.lbar = Array: [T+S], Time endowment in each year
- self.Nhat = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.Nhat_ss = Array: [I,S], Population of each country for each age cohort in the steady state
- self.lbar_ss = Int: Steady state time endowment. Normalized to 1.0
- self.T = Int: Number of Time Periods
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- n = Array: [I,S,T] or [I,S], Aggregate labor productivity for either the transition path or the steady steady-state
"""
if lhat.ndim == 2:
n = np.sum(self.e_ss*(self.lbar_ss-lhat)*self.Nhat_ss,axis=1)
elif lhat.ndim == 3:
n = np.sum(self.e[:,:,:self.T]*(self.lbar[:self.T]-lhat)*self.Nhat[:,:,:self.T],axis=1)
return n
def get_Y(self, kd, n):
"""
Description:
-Calculates the aggregate output based on equation (3.15)
Inputs:
- kd = Array: [I,S,T+S] or [I,S], Domestic owned capital path for either the transition path or steady-state.
- n = Array: [I,S,T+S] or [I,S], Aggregate labor productivity for either the transition path or the steady steady-state
Variables Called from Object:
- self.A = Array: [I], Technology level for each country
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- Y = Array: [I,S,T+S] or [I,S], Total output from firms for either the transition path or the steady steady-state
"""
if kd.ndim ==1:
Y = (kd**self.alpha) * ((self.A*n)**(1-self.alpha))
elif kd.ndim== 2:
Y = (kd**self.alpha) * (np.einsum("i,is->is",self.A,n)**(1-self.alpha))
return Y
def get_lifetime_decisionsSS(self, cK_1, w_ss, r_ss, Gamma_ss, bq_ss):
"""
Description:
- 1. Solves for future consumption decisions as a function of initial consumption (Equation 3.22)
- 2. Solves for savings decisions as a function of consumption decisions and previous savings decisions (Equation 3.19)
Inputs:
- cK_1 = Array: [I], Kids Consumption of first cohort for each country
- Gamma_ss = Array: [I,S], Gamma variable, used in Equation 4.22
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- avec_ss = Array: [I,S+1], Vector of steady state assets
- cKvec_ss = Array: [I,S], Vector of steady state kids consumption
- cvec_ss = Array: [I,S], Vector of steady state consumption
"""
cKvec_ss = np.zeros((self.I,self.S))
cvec_ss = np.zeros((self.I,self.S))
avec_ss = np.zeros((self.I,self.S+1))
cKvec_ss[:,0] = cK_1
cvec_ss[:,0] = cK_1/Gamma_ss[:,0]
for s in xrange(self.S-1):
#Equation 4.26
cKvec_ss[:,s+1] = ( ( (self.beta*(1-self.Mortality_ss[:,s])*(1+r_ss-self.delta) )**(1/self.sigma) )*cKvec_ss[:,s] )/np.exp(self.g_A)
#Equation 4.25
cvec_ss[:,s+1] = cKvec_ss[:,s+1]/Gamma_ss[:,s+1]
#Equation 4.23
avec_ss[:,s+1] = (w_ss*self.e_ss[:,s]*self.lbar_ss + (1 + r_ss - self.delta)*avec_ss[:,s] + bq_ss[:,s] \
- cvec_ss[:,s]*(1+self.Kids_ss[:,s]*Gamma_ss[:,s]+w_ss*self.e_ss[:,s]*(self.chi/(w_ss*self.e_ss[:,s]))**self.rho))*np.exp(-self.g_A)
#Equation 4.23 for final assets
avec_ss[:,s+2] = (w_ss*self.e_ss[:,s+1] + (1 + r_ss - self.delta)*avec_ss[:,s+1] - cvec_ss[:,s+1]*\
(1+self.Kids_ss[:,s+1]*Gamma_ss[:,s+1]+w_ss*self.e_ss[:,s+1]*(self.chi/(w_ss*self.e_ss[:,s+1]))\
**self.rho))*np.exp(-self.g_A)
return cvec_ss, cKvec_ss, avec_ss
def GetSSComponents(self, bq_ss, r_ss, PrintSSEulErrors=False):
"""
Description:
- Solves for all the other variables in the model using bq_ss and r_ss
Inputs:
- bq_ss = Array: [I,S],
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- self.A = Array: [I], Technology level for each country
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.Nhat_ss = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- get_lhat = Solves for leisure as in Equation 4.24
- get_n = Solves for labor supply as in Equation 4.17
- get_Gamma = Solves for the Gamma variable as in Equation 4.22
- get_Y = Solves for output as in Equation 4.18
- householdEuler_SS = System of Euler equations to solve the household problem. Used by opt.fsolve
Objects in Function:
- avec_ss = Array: [I,S], Steady state assets holdings for each country and cohort
- cKvec_ss = Array: [I,S], Steady state kids consumption for each country and cohort
- cvec_ss = Array: [I,S], Steady state consumption for each country and cohort
- c1_guess = Array: [I,S], Initial guess for consumption of the youngest cohort
- kd_ss = Array: [I], Steady state total capital holdings for each country
- kf_ss = Array: [I], Steady state foreign capital in each country
- lhat_ss = Array: [I,S], Steady state leisure decision for each country and cohort
- n_ss = Array: [I], Steady state labor supply
- opt_c1 = Array: [I,S], Optimal consumption of the youngest cohort
- Gamma_ss = Array: [I,S], Steady state Gamma variable (see equation 4.22)
- w_ss = Array: [I], Steady state wage rate
- y_ss = Array: [I], Steady state output of each country
Outputs:
- w_ss, cvec_ss, cKvec_ss, avec_ss, kd_ss, kf_ss, n_ss, y_ss, and lhat_ss
"""
def householdEuler_SS(cK_1, w_ss, r_ss, Gamma_ss, bq_ss):
"""
Description:
- This is the function called by opt.fsolve.
Will stop iterating until a correct value of initial
consumption for each country makes the final assets holdings of each country equal to 0
Inputs:
- cK_1 = Array: [I], Kids Consumption of first cohort for each country
- psi_ss = Array: [I,S], Psi variable, used in Equation 3.21
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetimedecisionsSS = calls the above function for the purpose of solving for its roots
in an fsolve.
Objects in Function:
- cpath = Array: [I,S], Vector of steady state consumption
- cK_path = Array: [I,S], Vector of steady state kids consumption
- aseets_path = Array: [I,S+1], Vector of steady state assets
Outputs:
- Euler = Array: [I], Final assets for each country. Must = 0 for system to solve
"""
cpath, cK_path, assets_path = self.get_lifetime_decisionsSS(cK_1, w_ss, r_ss, Gamma_ss, bq_ss)
Euler = assets_path[:,-1]
if np.any(cpath<0):
print "WARNING! The fsolve for initial optimal consumption guessed a negative number"
Euler = np.ones(Euler.shape[0])*9999.
return Euler
def checkSSEulers(cvec_ss, cKvec_ss, avec_ss, w_ss, r_ss, bq_ss, Gamma_ss):
"""
Description:
-Verifies the Euler conditions are statisified for solving for the steady
Inputs:
- cvec_ss = Array: [I,S], Steady state consumption for each country and cohort
- cKvec_ss = Array: [I,S], Steady state kids consumption for each country and cohort
- avec_ss = Array: [I,S], Steady state assets holdings for each country and cohort
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady state interest rate
- bq_ss = Array: [I,S], Steady state bequests level
- Gamma_ss = Array: [I,S], Steady state shorthand variable, See 4.22
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqvec_ss = Array: [I,S], Distribution of bequests in the steady state
- self.cKvec_ss = Array: [I,S], Steady state kids' consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.Gamma_ss = Array: [I,S], Steady state value of shorthand calculation variable
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.w_ss = Array: [I], Steady state wage rate
- self.beta = Scalar: Calculated overall future discount rate
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.r_ss = Scalar: Steady state intrest rate
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S], Matrix product of w and e
Outputs:
- None
"""
we = np.einsum("i,is->is",w_ss,self.e_ss)
Household_Euler = avec_ss[:,-1]
Chained_C_Condition = cKvec_ss[:,:-1]**(-self.sigma) - \
self.beta*(1-self.Mortality_ss[:,:-1])*(cKvec_ss[:,1:]*np.exp(self.g_A))**-self.sigma * (1+r_ss-self.delta)
Modified_Budget_Constraint = cvec_ss -( we*self.lbar_ss + (1+r_ss-self.delta)*avec_ss[:,:-1] + bq_ss - avec_ss[:,1:]*np.exp(self.g_A) )\
/(1+self.Kids_ss*Gamma_ss+we*(self.chi/we)**self.rho)
Consumption_Ratio = cKvec_ss - cvec_ss*Gamma_ss
return Household_Euler, Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio
#Equation 4.19
w_ss = (self.alpha/r_ss)**(self.alpha/(1-self.alpha))*(1-self.alpha)*self.A
#Equation 4.22
Gamma_ss = self.get_Gamma(w_ss, self.e_ss)
#Initial guess for the first cohort's kids consumption
cK1_guess = np.ones(self.I)*5
#Finds the optimal kids consumption for the first cohort
opt_cK1 = opt.fsolve(householdEuler_SS, cK1_guess, args = (w_ss, r_ss, Gamma_ss, bq_ss))
#Gets the optimal paths for consumption, kids consumption and assets as a function of the first cohort's consumption
cvec_ss, cKvec_ss, avec_ss = self.get_lifetime_decisionsSS(opt_cK1, w_ss, r_ss, Gamma_ss, bq_ss)
if PrintSSEulErrors:
Household_Euler, Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio = checkSSEulers(cvec_ss, cKvec_ss, avec_ss, w_ss, r_ss, bq_ss, Gamma_ss)
print "\nZero final period assets satisfied:", np.isclose(np.max(np.absolute(Household_Euler)), 0)
print "Equation 4.26 satisfied:", np.isclose(np.max(np.absolute(Chained_C_Condition)), 0)
print "Equation 4.23 satisfied:", np.isclose(np.max(np.absolute(Modified_Budget_Constraint)), 0)
print "Equation 4.25 satisfied", np.isclose(np.max(np.absolute(Consumption_Ratio)), 0)
#print Chained_C_Condition[0,:]
#print Modified_Budget_Constraint[0,:]
#Snips off the final entry of assets since it is just 0 if the equations solved correctly
avec_ss = avec_ss[:,:-1]
#Equation 4.24
lhat_ss = self.get_lhat(cvec_ss, w_ss, self.e_ss)
#Equation 4.17
n_ss = self.get_n(lhat_ss)
#Equation 4.16
kd_ss = np.sum(avec_ss*self.Nhat_ss,axis=1)
#Equation 4.18
y_ss = self.get_Y(kd_ss,n_ss)
#Equation 4.27
kf_ss = (self.alpha*self.A/r_ss)**(1/(1-self.alpha)) * n_ss-kd_ss
return w_ss, cvec_ss, cKvec_ss, avec_ss, kd_ss, kf_ss, n_ss, y_ss, lhat_ss
def EulerSystemSS(self, guess, PrintSSEulErrors=False):
"""
Description:
- System of Euler equations that must be satisfied (or = 0) for the ss to solve.
Inputs:
- guess = Array: [I+1], Contains guesses for individual bequests in each country
and the guess for the world intrest rate
- PrintSSEulErrors = Boolean: True prints the Euler Errors in each iteration of calculating the steady state
Variables Called from Object:
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.Nhat_ss = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- None
Other Functions Called:
- GetSSComponents = System of equations that solves for wages, consumption, assets,
capital stocks, labor input, domestic output, and leisure in terms
of the world intrest rate and bequests
Objects in Function:
- alldeadagent_assets = Array: [I], Sum of assets of all the individuals who die in the steady state.
Evenly distributed to eligible-aged cohorts.
- avec_ss = Array: [I,S], Current guess for the ss assets holdings for each country and cohort
- bqindiv_ss = Array: [I], Current guess for the amount of bequests each eligible-aged
individual will receive in each country
- bq_ss = Array: [I,S], Vector of bequests received for each cohort and country.
Basically bqindiv_ss copied for each eligible-aged individual.
- cKvec_ss = Array: [I,S], Current guess for ss kids' consumption for each country and cohort.
- cvec_ss = Array: [I,S], Current guess for ss consumption for each country and cohort
- kd_ss = Array: [I], Current guess for ss total domestically-held capital for each country
- kf_ss = Array: [I], Current guess for ss foreign capital in each country
- lhat_ss = Array: [I,S], Current guess for ss leisure decision for each country and cohort.
- n_ss = Array: [I], Current guess for ss labor supply
- w_ss = Array: [I], Current guess for each countries ss wage rate as a function of r_ss and bqvec_ss
- y_ss = Array: [I], Current guess for ss output of each country
- r_ss = Scalar: Current guess for the steady-state intrest rate
- Euler_bq = Array: [I], Distance between bqindiv_ss and the actual bqindiv_ss calculated in the system.
Must = 0 for the ss to correctly solve.
- Euler_kf = Scalar: Sum of the foreign capital stocks. Must = 0 for the ss to correctly solve
Outputs:
- Euler_all = Array: [I+1], Euler_bq and Euler_kf stacked together. Must = 0 for the ss to correctly solve
"""
#Breaking up the input into its 2 components
bqindiv_ss = guess[:-1]
r_ss = guess[-1]
#Initializes a vector of bequests received for each individial. Will be = 0 for a block of young and a block of old cohorts
bq_ss = np.zeros((self.I,self.S))
bq_ss[:,self.FirstFertilityAge:self.FirstDyingAge] = \
np.einsum("i,s->is", bqindiv_ss, np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Calls self.GetSSComponents, which solves for all the other ss variables in terms of bequests and intrest rate
w_ss, cvec_ss, cKvec_ss, avec_ss, kd_ss, kf_ss, n_ss, y_ss, lhat_ss = self.GetSSComponents(bq_ss, r_ss, PrintSSEulErrors)
#Sum of all assets holdings of dead agents to be distributed evenly among all eligible agents
alldeadagent_assets = np.sum(avec_ss[:,self.FirstDyingAge:]*\
self.Mortality_ss[:,self.FirstDyingAge:]*self.Nhat_ss[:,self.FirstDyingAge:], axis=1)
#Equation 3.29
Euler_bq = bqindiv_ss - alldeadagent_assets/np.sum(self.Nhat_ss[:,self.FirstFertilityAge:self.FirstDyingAge],\
axis=1)
#Equation 3.24
Euler_kf = np.sum(kf_ss)
Euler_all = np.append(Euler_bq, Euler_kf)
if PrintSSEulErrors: print "Euler Errors:", Euler_all
return Euler_all
def SteadyState(self, rss_guess, bqss_guess, PrintSSEulErrors=False):
"""
Description:
- Finds the steady state of the OLG Model by doing the following:
1. Searches over values of r and bq that satisfy Equations 3.19 and 3.24
2. Uses the correct ss values of r and bq to find all the other ss variables
3. Checks to see of the system has correctly solved
Inputs:
- bqindiv_ss_guess = Array: [I], Initial guess for ss bequests that each eligible-aged individual will receive
- PrintSSEulErrors = Boolean: True prints the Euler Errors in each iteration of calculating the steady state
- rss_guess = Scalar: Initial guess for the ss intrest rate
Variables Called from Object:
- self.I = Int: Number of Countries
- self.FirstFertilityAge = Int: First age where agents give birth
- self.FirstDyingAge = Int: First age where agents begin to die
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqindiv_ss = Array: [I], Bequests that each eligible-aged individual will receive in the steady state
- self.bqvec_ss = Array: [I,S], Distribution of bequests in the steady state
- self.cKvec_ss = Array: [I,S], Steady State kid's consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kd_ss = Array: [I], Steady state total domestically-owned capital holdings for each country
- self.kf_ss = Array: [I], Steady state foreign capital in each country
- self.lhat_ss = Array: [I,S], Steady state leisure decision for each country and cohort
- self.n_ss = Array: [I], Steady state aggregate labor productivity in each country
- self.Gamma_ss = Array: [I,S], Steady state value of shorthand calculation variable
- self.w_ss = Array: [I], Steady state wage rate
- self.y_ss = Array: [I], Steady state output in each country
- self.r_ss = Scalar: Steady state intrest rate
Other Functions Called:
- self.EulerSystemSS = Initiates the whole process of solving for the steady state, starting with this function
- self.GetSSComponenets = Once the bequests and interest rates are solved for, this function gives us what
the implied individual pieces would be. Then, we have those pieces stored in the object.
- self.get_Gamma = given wage and productivity paths, this function calculates the shorthand variable path.
Objects in Function:
- alldeadagent_assets = Array: [I], Sum of assets of all the individuals who die in the steady state.
Evenly distributed to eligible-aged cohorts.
- Euler_bq = Array: [I], Distance between bqindiv_ss and the actual bqindiv_ss calculated in the system.
Must = 0 for the ss to correctly solve.
- Euler_kf = Scalar: Sum of the foreign capital stocks. Must = 0 for the ss to correctly solve
Outputs:
- None
"""
#Prepares the initial guess for the fsolve
guess = np.append(bqss_guess, rss_guess)
#Searches over bq and r to find values that satisfy the Euler Equations (3.19 and 3.24)
ss = opt.fsolve(self.EulerSystemSS, guess, args=PrintSSEulErrors)
#Breaking up the output into its 2 components
self.bqindiv_ss = ss[:-1]
self.r_ss = ss[-1]
#Initializes a vector for bequests distribution. Will be = 0 for a block of young and a block of old cohorts who don't get bequests
self.bqvec_ss = np.zeros((self.I,self.S))
self.bqvec_ss[:,self.FirstFertilityAge:self.FirstDyingAge] = np.einsum("i,s->is",self.bqindiv_ss,\
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Calls self.GetSSComponents, which solves for all the other ss variables in terms of bequests and intrest rate
self.w_ss, self.cvec_ss, self.cKvec_ss, self.avec_ss, self.kd_ss, self.kf_ss, self.n_ss, self.y_ss, self.lhat_ss \
= self.GetSSComponents(self.bqvec_ss,self.r_ss)
#Calculates and stores the steady state gamma value
self.Gamma_ss = self.get_Gamma(self.w_ss,self.e_ss)
#Sum of all assets holdings of dead agents to be distributed evenly among all eligible agents
alldeadagent_assets = np.sum(self.avec_ss[:,self.FirstDyingAge:]*self.Mortality_ss[:,self.FirstDyingAge:]*\
self.Nhat_ss[:,self.FirstDyingAge:], axis=1)
print "\n\nSTEADY STATE FOUND!"
#Checks to see if the Euler_bq and Euler_kf equations are sufficiently close to 0
if self.CheckerMode==False:
#Equation 3.29
Euler_bq = self.bqindiv_ss - alldeadagent_assets/np.sum(self.Nhat_ss[:,self.FirstFertilityAge:self.FirstDyingAge],\
axis=1)
#Equation 3.24
Euler_kf = np.sum(self.kf_ss)
print "-Euler for bq satisfied:", np.isclose(np.max(np.absolute(Euler_bq)), 0)
print "-Euler for r satisfied:", np.isclose(Euler_kf, 0), "\n\n"
def PrintSSResults(self):
"""
Description:
-Prints the final result of steady state calculations
Inputs:
- None
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.cK_vec_ss = Array: [I,S], Steady state kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kf_ss = Array: [I], Steady state foreign capital in each country
- self.kd_ss = Array: [I], Steady state total capital holdings for each country
- self.n_ss = Array: [I], Steady state aggregate productivity in each country
- self.w_ss = Array: [I], Steady state wage rate
- self.y_ss = Array: [I], Steady state output in each country
- self.r_ss = Scalar: Steady state intrest rate
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
print "assets steady state:", self.avec_ss
print "kf steady state", self.kf_ss
print "kd steady state", self.kd_ss
print "bq steady state", self.bqindiv_ss
print "n steady state", self.n_ss
print "y steady state", self.y_ss
print "r steady state", self.r_ss
print "w steady state", self.w_ss
print "c_vec steady state", self.cvec_ss
print "cK_vec steady state", self.cKvec_ss
def plotSSResults(self):
"""
Description:
- Plots the final calculations of the Steady State
Inputs:
- None
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqvec_ss = Array: [I,S], Distribution of bequests in the steady state
- self.cKvec_ss = Array: [I,S], Steady state kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
plt.title("Steady state")
plt.subplot(231)
for i in range(self.I):
plt.plot(range(self.S),self.cvec_ss[i,:])
plt.title("Consumption")
#plt.legend(self.I_touse[:self.I])
plt.subplot(232)
for i in range(self.I):
plt.plot(range(self.S),self.cKvec_ss[i,:])
plt.title("Kids' Consumption")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(233)
for i in range(self.I):
plt.plot(range(self.S),self.avec_ss[i,:])
plt.title("Assets")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(234)
for i in range(self.I):
plt.plot(range(self.S),self.lhat_ss[i,:])
plt.title("Leisure")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(235)
for i in range(self.I):
plt.plot(range(self.S),self.bqvec_ss[i,:])
plt.title("Bequests")
#plt.legend(self.I_touse[:self.I])
plt.show()
def plotSSUtility(self, cK_1):
"""
Description:
- Plots the steady state values across S. You do this, James
Inputs:
- cK_1
Variables Called From Object:
- self.S
- self.Gamma_ss
- self.beta
- self.Mortality_ss
- self.r_ss
- self.delta
- self.sigma
- self.g_A
- self.chi
- self.w_ss
- self.e_ss
- self.rho
- self.Kids_ss
- self.lbar_ss
- self.bqvec_ss
- self.cvec_ss
Variables Stored in Object:
-
Other Functions Called:
-
Objects in Function:
-
Outputs:
-
"""
cKvec_ss = np.zeros((len(cK_1),self.S))
cvec_ss = np.zeros((len(cK_1),self.S))
avec_ss = np.zeros((len(cK_1),self.S+1))
cKvec_ss[:,0] = cK_1
cvec_ss[:,0] = cK_1/self.Gamma_ss[0,0]
#I QUESTION WHY WE'RE DOING THIS
for s in xrange(self.S-1):
#Equation 4.26
cKvec_ss[:,s+1] = ( ((self.beta**-1*(1-self.Mortality_ss[0,s])*(1+self.r_ss-self.delta))**(1/self.sigma) )*cKvec_ss[:,s] )/np.exp(self.g_A)
#Equation 4.25
cvec_ss[:,s+1] = cKvec_ss[:,s+1]/self.Gamma_ss[0,s+1]
#Equation 4.23
avec_ss[:,s+1] = (self.w_ss[0]*self.e_ss[0,s]*self.lbar_ss + (1 + self.r_ss - self.delta)*avec_ss[:,s] + self.bqvec_ss[0,s] \
- cvec_ss[:,s]*(1+self.Kids_ss[0,s]*self.Gamma_ss[0,s]+self.w_ss[0]*self.e_ss[0,s]*(self.chi/(self.w_ss[0]*self.e_ss[0,s]))**self.rho))*np.exp(-self.g_A)
#Equation 4.23 for final assets
avec_ss[:,s+2] = (self.w_ss[0]*self.e_ss[0,s+1] + (1 + self.r_ss - self.delta)*avec_ss[:,s+1] - cvec_ss[:,s+1]*\
(1+self.Kids_ss[0,s+1]*self.Gamma_ss[0,s+1]+self.w_ss[0]*self.e_ss[0,s+1]*(self.chi/(self.w_ss[0]*self.e_ss[0,s+1]))\
**self.rho))*np.exp(-self.g_A)
lhat_ss = cvec_ss*(self.chi/self.w_ss[0]*self.e_ss[0,:])**self.rho
betaj = self.beta**np.arange(self.S)
U = betaj*(1-self.sigma)**-1*(1-self.Mortality_ss[0])*\
( (cvec_ss**(1-1/self.rho) + self.chi*lhat_ss**(1-1/self.rho))**((1/self.sigma)/(1-1/self.rho))\
+ self.Kids_ss[0]*cKvec_ss**(1-self.sigma) )
V = betaj*(1-self.sigma)**-1*(1-self.Mortality_ss[0])*\
(cvec_ss**(1-1/self.rho) + self.chi*lhat_ss**(1-1/self.rho))**((1/self.sigma)/(1-1/self.rho))
H = betaj**-1*(1-self.sigma)**-1*self.Kids_ss[0]*cKvec_ss**(1-self.sigma)
U2 = np.sum(V+H, axis=1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
c1 = cK_1/self.Gamma_ss[0,0]
X, Y = np.meshgrid(c1, cK_1)
Z = U2
ax.plot_surface(X, Y, Z)
ax.set_xlabel('Consumption')
ax.set_ylabel('Kids Consumption')
ax.set_zlabel('Utility')
#plt.show()
#TIMEPATH-ITERATION
def set_initial_values(self, r_init, bq_init, a_init):
"""
Description:
- Saves the initial guesses of r, bq and a given by the user into the object
Inputs:
- a_init = Array: [I,S], Initial asset distribution given by User
- bq_init = Array: [I], Initial bequests given by User
- r_init = Scalar: Initial interest rate given by User
Variables Called from Object:
- None
Variables Stored in Object:
- self.a_init = Array: [I,S], Initial asset distribution given by Users
- self.bq_init = Array: [I], Initial bequests given by User
- self.r_init = Scalar: Initial interest rate given by User
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
self.r_init = r_init
self.bq_init = bq_init
self.a_init = a_init
def get_initialguesses(self):
"""
Description:
- Generates an initial guess path used for beginning TPI calculation. The guess for the transition path for r follows the form
of a quadratic function given by y = aa x^2 + bb x + cc, while the guess for the bequests transition path is linear
Inputs:
- None
Variables Called from Object:
- self.bq_init = Array: [I], Initial bequests given by User
- self.I = Int: Number of Countries
- self.T = Int: Number of Time Periods
- self.r_init = Scalar: Initial interest rate given by User
- self.r_ss = Scalar: Steady state interest rate
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- aa = Scalar: coefficient for x^2 term
- bb = Scalar: coefficient for x term
- cc = Scalar: coefficient for constant term
Outputs:
- bqpath_guess = Array: [I,T], Initial path of bequests in quadratic form
- rpath_guess = Array: [T], Initial path of interest rates in quadratic form
"""
rpath_guess = np.zeros(self.T)
bqpath_guess = np.zeros((self.I,self.T))
func = lambda t, a, b: a/t + b
t = np.linspace(1,self.T, self.T-1)
x = np.array([0.0001,self.T])
y = np.array([self.r_init, self.r_ss])
popt, pcov = opt.curve_fit(func,x,y)
rtest = np.hstack(( self.r_init, func(t,popt[0],popt[1]) ))
plt.plot(range(self.T), rtest)
#plt.show()
cc = self.r_init
bb = -2 * (self.r_init-self.r_ss)/(self.T-1)
aa = -bb / (2*(self.T-1))
rpath_guess[:self.T] = aa * np.arange(0,self.T)**2 + bb*np.arange(0,self.T) + cc
#rpath_guess = rtest
for i in range(self.I):
bqpath_guess[i,:self.T] = np.linspace(self.bq_init[i], self.bqindiv_ss[i], self.T)
return rpath_guess, bqpath_guess
def GetTPIComponents(self, bqvec_path, r_path, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Gets the transition paths for all the other variables in the model as a function of bqvec_path and r_path
Inputs:
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- r_path = Array: [T], Transition path for the intrest rate
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For debugging purposes
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_c_cK_a_matrices = Gets consumption, kids consumption and assets decisions as a function of r, w, and bq
- get_lhat = Gets leisure as a function of c, w, and e
- get_n = Gets aggregate labor supply
- get_Gamma = Application of Equation 4.22
- get_Y = Gets output
- NOTE: This function also contains the functions get_lifetime_decisions_Future, get_lifetime_decisions_Alive,
HHEulerSystem, and check_household_conditions, all of which are called in get_c_a_matrices
Objects in Function:
- Gamma = Array: [I,S,T+S], Transition path of shorthand calculation variable Gamma (Equation 4.22)
Outputs:
- a_matrix = Array: [I,S,T+S], Transition path for assets holdings in each country
- c_matrix = Array: [I,S,T+S], Transition path for consumption in each country
- cK_matrix = Array: [I,S,T+S], Transition path for kids consumption in each country
- kd_path = Array: [I,T], Transition path for total domestically-owned capital in each country
- kf_path = Array: [I,T], Transition path for foreign capital in each country
- lhat_path = Array: [I,S,T+S], Transition path for leisure for each cohort and country
- n_path = Array: [I,T], Transition path for total labor supply in each country
- w_path = Array: [I,T], Transition path for the wage rate in each country
- y_path = Array: [I,T], Transition path for output in each country
"""
#Functions that solve lower-diagonal household decisions in vectors
def get_lifetime_decisions_Future(cK0, c_uppermat, cK_uppermat, a_uppermat, w_path, r_path, Gamma, bqvec_path):
"""
Description:
- Gets household decisions for consumption and assets for each agent to be born in the future
Inputs:
- a_uppermat = Array: [I,S+1,T+S], Like c_uppermat, but for assets. Contains S+1 dimensions so we can consider
any leftover assets each agent has at the end of its lifetime.
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- cK0 = Array: [I*T], Initial consumption in each agent's lifetime
- cK_uppermat = Array: [I,S,T+S], Kids consumption matrix that already contains the kids consumptions decisions
for agents currently alive and is 0 for all agents born in the future
- c_uppermat = Array: [I,S,T+S], Consumption matrix that already contains the consumption decisions for agents
currently alive and is all 0s for agents born in the future.
This function fills in the rest of this matrix.
- Gamma = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor Productivities
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- cy_fillca = External cython module that's equivilent to the for loop called in this function. It's marginally faster
compared to the loop that's in this code. This part will likely be replaced in the future. See pure_cython.pyx
for more details
Objects in Function:
- we = Array: [I,S,T+S] Matrix product of w and e
Outputs:
- a_matrix = Array: [I,S+1,T+S], Filled in a_uppermat now with assets for cohorts to be born in the future
- cK_matrix = Array: [I,S,T+S], Filled in cK_uppermat now with kids consumption for cohorts to be born in the future
- c_matrix = Array: [I,S,T+S], Filled in c_uppermat now with consumption for cohorts to be born in the future
"""
#Initializes consumption and assets with all of the upper triangle already filled in
c_matrix = c_uppermat
cK_matrix = cK_uppermat
a_matrix = a_uppermat
cK_matrix[:,0,:self.T] = cK0.reshape(self.I,self.T)
c_matrix[:,0,:self.T] = cK_matrix[:,0,:self.T]/Gamma[:,0,:self.T]
#Gets we ahead of time for easier calculation
we = np.einsum("it,ist->ist",w_path,self.e)
if self.ShaveTime:
cy_fillca(c_matrix,cK_matrix,a_matrix,r_path,self.MortalityRates,bqvec_path,we,Gamma,self.lbar,self.Kids,self.beta,self.chi,self.delta,self.g_A,self.rho,self.sigma)
#Loops through each year (across S) and gets decisions for every agent in the next year
else:
for s in xrange(self.S-1):
#Gets consumption for every agents' next year using Equation 3.22
cK_matrix[:,s+1,s+1:self.T+s+1] = ((self.beta * (1-self.MortalityRates[:,s,s:self.T+s]) * (1 + r_path[s+1:self.T+s+1] - self.delta))**(1/self.sigma)\
* cK_matrix[:,s,s:self.T+s])*np.exp(-self.g_A)
c_matrix[:,s+1,s+1:self.T+s+1] = cK_matrix[:,s+1,s+1:self.T+s+1]/Gamma[:,s+1,s+1:self.T+s+1]
#Gets assets for every agents' next year using Equation 3.19
a_matrix[:,s+1,s+1:self.T+s+1] = ( (we[:,s,s:self.T+s]*self.lbar[s:self.T+s] + (1 + r_path[s:self.T+s] - self.delta)*a_matrix[:,s,s:self.T+s] + bqvec_path[:,s,s:self.T+s])\
-c_matrix[:,s,s:self.T+s]*(1+self.Kids[:,s,s:self.T+s]*Gamma[:,s,s:self.T+s]+we[:,s,s:self.T+s]*(self.chi/we[:,s,s:self.T+s])**(self.rho)\
) )*np.exp(-self.g_A)
#Gets assets in the final period of every agents' lifetime
s=self.S-2
a_matrix[:,-1,s+2:self.T+s+2] = ( (we[:,-1,s+1:self.T+s+1]*self.lbar[s+1:self.T+s+1] + (1 + r_path[s+1:self.T+s+1] - self.delta)*a_matrix[:,-2,s+1:self.T+s+1])\
-c_matrix[:,-1,s+1:self.T+s+1]*(1+self.Kids[:,-1,s+1:self.T+s+1]*Gamma[:,-1,s+1:self.T+s+1]+we[:,-1,s+1:self.T+s+1]*(self.chi/we[:,-1,s+1:self.T+s+1])**(self.rho) ) )*np.exp(-self.g_A)
return c_matrix, cK_matrix, a_matrix
#Functions that solve upper-diagonal household decisions in vectors
def get_lifetime_decisions_Alive(cK0, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path):
"""
Description:
- Gets household decisions for consumption and assets for each cohort currently alive (except for the oldest cohort, whose household problem is a closed form solved in line 1435)
Inputs:
- a_matrix = Array: [I,S+1,T+S], Empty matrix that gets filled in with savings decisions each cohort currently alive
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- c0 = Array: [I*(S-1)], Today's consumption for each cohort
- cK_matrix = Array: [I,S,T+S], Empty matrix that gets filled with kids consumption decisions for each cohort currently living
- c_matrix = Array: [I,S,T+S], Empty matrix that gets filled in with consumption decisions for each cohort currently alive
- psi = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
Variables Called from Object:
- self.MortalityRates = Array: [I,S,T], Mortality rates of each country for each age cohort and year
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S,T+S], Matrix product of w and e
Outputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions, now including those who are alive in time 0
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions, now including those who are alive in time 0
- c_matrix = Array: [I,S,T+S], Consumption decisions, now including those who are alive in time 0
"""
cK_matrix[:,:-1,0] = cK0.reshape(self.I,self.S-1)
c_matrix[:,:-1,0] = cK_matrix[:,:-1,0]/Gamma[:,:-1,0]
we = np.einsum("it,ist->ist",w_path,self.e)
for s in xrange(self.S):
t = s
cK_matrix[:,s+1:,t+1] = (self.beta * (1-self.MortalityRates[:,s:-1,t]) * (1 + r_path[t+1] - self.delta))**(1/self.sigma)\
* cK_matrix[:,s:-1,t]*np.exp(-self.g_A)
c_matrix[:,s+1:,t+1] = cK_matrix[:,s+1:,t+1]/Gamma[:,s+1:,t+1]
a_matrix[:,s+1:,t+1] = ( (we[:,s:,t]*self.lbar[t] + (1 + r_path[t] - self.delta)*a_matrix[:,s:-1,t] + bqvec_path[:,s:,t])\
-c_matrix[:,s:,t]*(1+self.Kids[:,s:,t]*Gamma[:,s:,t]+we[:,s:,t]*(self.chi/we[:,s:,t])**(self.rho) ) )*np.exp(-self.g_A)
#Gets assets in the final period of every agents' lifetime
a_matrix[:,-1,t+2] = ( (we[:,-1,t+1] + (1 + r_path[t+1] - self.delta)*a_matrix[:,-2,t+1])\
-c_matrix[:,-1,t+1]*(1+self.Kids[:,-1,t+1]*Gamma[:,-1,t+1]+we[:,-1,t+1]*(self.chi/we[:,-1,t+1])**(self.rho) ) )*np.exp(-self.g_A)
return c_matrix, cK_matrix, a_matrix
def Alive_EulerSystem(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path):
"""
Description: This is essentially the objective function for households decisions.
This function is called by opt.fsolve and searches over levels of
intial consumption that lead to the agents not having any assets when they die.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- cK0_guess = Array: [I*(T+S)] or [I*(S-1)], Guess for initial consumption, either for future agents or agents currently alive
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions for each cohort
- c_matrix = Array: [I,S,T+S], Consumption decisions for each cohort
- psi = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T+S], Transition path for the intrest rate
- w_path = Array: [I,T+S], Transition path for the wage rate in each country
- Alive = Boolean: True means this function was called to solve for agents' decisions who are currently alive
False means this function was called to solve for agents' decisions will be born in future time periods
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetime_decisions_Alive = Gets consumption and assets decisions for agents currently alive as a
function of consumption in the initial period (t=0).
- get_lifetime_decisions_Future = Gets consumption and assets decisions each agent to be born in the future as a
function of each agent's initial consumption (s=0).
Objects in Function:
- a_matrix = Array: [I,S+1,T], Savings decisions each cohort
- c_matrix = Array: [I,S,T], Consumption decisions each cohort
Outputs:
- Euler = Array: [T] or [S], Remaining assets when each cohort dies off.
Must = 0 for the Euler system to correctly solve.
"""
#Gets the decisions paths for each agent
c_matrix, cK_matrix, a_matrix = get_lifetime_decisions_Alive(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path)
#Household Eulers are solved when the agents have no assets at the end of their life
Euler = np.ravel(a_matrix[:,-1,1:self.S])
#print "Max Euler", max(Euler)
return Euler
def Future_EulerSystem(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path):
"""
Description: This is essentially the objective function for households decisions.
This function is called by opt.fsolve and searches over levels of
intial consumption that lead to the agents not having any assets when they die.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- c0_guess = Array: [I*(T+S)] or [I*(S-1)], Guess for initial consumption, either for future agents or agents currently alive
- c_matrix = Array: [I,S,T+S], Consumption decisions each cohort
- psi = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T+S], Transition path for the wage rate in each country
- Alive = Boolean: True means this function was called to solve for agents' decisions who are currently alive
False means this function was called to solve for agents' decisions will be born in future time periods
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetime_decisions_Alive = Gets consumption and assets decisions for agents currently alive as a
function of consumption in the initial period (t=0).
- get_lifetime_decisions_Future = Gets consumption and assets decisions each agent to be born in the future as a
function of each agent's initial consumption (s=0).
Objects in Function:
- a_matrix = Array: [I,S+1,T], Savings decisions each cohort
- c_matrix = Array: [I,S,T], Consumption decisions each cohort
Outputs:
- Euler = Array: [T] or [S], Remaining assets when each cohort dies off.
Must = 0 for the Euler system to correctly solve.
"""
#Gets the decisions paths for each agent
c_matrix, cK_matrix, a_matrix = get_lifetime_decisions_Future(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path)
#Household Eulers are solved when the agents have no assets at the end of their life
Euler = np.ravel(a_matrix[:,-1,self.S:])
#print "Max Euler", max(Euler)
return Euler
#Checks various household condidions
def check_household_conditions(w_path, r_path, c_matrix, cK_matrix, a_matrix, Gamma, bqvec_path):
"""
Description:
- Essentially returns a matrix of residuals of the left and right sides of the Houehold Euler equations
to make sure the system solved correctly. Mostly used for debugging.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions for each cohort
- c_matrix = Array: [I,S,T+S], Consumption decisions for each each cohort
- Gammma = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 4.22)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T+S], Transition path for the wage rate in each country
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor Productivities
- self.T = Int: Number of time periods
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S,T+S], Matrix product of w and e
Outputs:
- Chained_C_Condition = Array: [I,S-1,T+S-1], Matrix of residuals in Equation 3.22
- Household_Euler = Array: [I,T+S], Matrix of residuals in of the 0 remaining assets equation
- Modified_Budget_Constraint= Array: [I,S-1,T+S-1], Matrix of residuals in Equation 3.19
"""
#Multiplies wages and productivities ahead of time for easy calculations of the first two equations below
we = np.einsum("it,ist->ist",w_path[:,:self.T-1],self.e[:,:-1,:self.T-1])
#Disparity between left and right sides of Equation 4.26
Chained_C_Condition = cK_matrix[:,:-1,:self.T-1]**(-self.sigma)\
- self.beta*(1-self.MortalityRates[:,:-1,:self.T-1])\
*(cK_matrix[:,1:,1:self.T]*np.exp(self.g_A))**(-self.sigma)*(1+r_path[1:self.T]-self.delta)
#Disparity between left and right sides of Equation 4.23
Modified_Budget_Constraint = c_matrix[:,:-1,:self.T-1]\
- (we*self.lbar[:self.T-1] + (1+r_path[:self.T-1]-self.delta)*a_matrix[:,:-2,:self.T-1] + bqvec_path[:,:-1,:self.T-1]\
- a_matrix[:,1:-1,1:self.T]*np.exp(self.g_A))\
/(1 + self.Kids[:,:-1,:self.T-1]*Gamma[:,:-1,:self.T-1] + we*(self.chi/we)**(self.rho) )
#Disparity between left and right sides of Equation 4.25
Consumption_Ratio = cK_matrix - c_matrix*Gamma
#Any remaining assets each agent has at the end of its lifetime. Should be 0 if other Eulers are solving correctly
Household_Euler = a_matrix[:,-1,:]
return Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio, Household_Euler
#Gets consumption and assets matrices using fsolve
def get_c_cK_a_matrices(w_path, r_path, Gamma, bqvec_path, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Solves for the optimal consumption and assets paths by searching over initial consumptions for agents alive and unborn
Inputs:
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- Gamma = Array: [I,S,T+S], Transition path of shorthand calculation variable Gamma (Equation 4.22)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For de-bugging purposes.
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
Variables Called from Object:
- self.a_init = Array: [I,S], Initial asset distribution given by User
- self.cKvec_ss = Array: [I,S], Steady state Kids consumption
- self.e = Array: [I,S,T+S], Labor Productivities
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
Variables Stored in Object:
- None
Other Functions Called:
- HHEulerSystem = Objective function for households (final assets at death = 0). Must solve for HH conditions to be satisfied
- get_lifetime_decisions_Alive = Gets lifetime consumption and assets decisions for agents alive in the initial time period
- get_lifetime_decisions_Future = Gets lifetime consumption and assets decisions for agents to be born in the future
Objects in Function:
- ck0alive_guess = Array: [I,S-1], Initial guess for kids consumption in this period for each agent alive
- ck0future_guess = Array: [I,T+S], Initial guess for initial kids consumption for each agent to be born in the future
- Chained_C_Condition = Array: [I,S,T+S], Disparity between left and right sides of Equation 3.22.
Should be all 0s if the household problem was solved correctly.
- Household_Euler = Array: [I,T+S], Leftover assets at the end of the final period each agents lives.
Should be all 0s if the household problem was solved correctly
- Modified_Budget_Constraint = Array: [I,S,T+S], Disparity between left and right sides of Equation 3.19.
Should be all 0s if the household problem was solved correctly.
Outputs:
- a_matrix[:,:-1,:self.T] = Array: [I,S,T+S], Assets transition path for each country and cohort
- c_matrix[:,:,:self.T] = Array: [I,S,T+S], Consumption transition path for each country and cohort
- cK_matrix[:,:,:self.T] = Array: [I,S,T+S:, Kids Consumption transition path for each country cohort
"""
#Initializes the consumption and assets matrices
c_matrix = np.zeros((self.I,self.S,self.T+self.S))
cK_matrix = np.zeros((self.I,self.S,self.T+self.S))
a_matrix = np.zeros((self.I,self.S+1,self.T+self.S))
a_matrix[:,:-1,0] = self.a_init
#Equation 3.19 for the oldest agent in time t=0. Note that this agent chooses to consume everything so that it has no assets in the following period
c_matrix[:,self.S-1,0] = (w_path[:,0]*self.e[:,self.S-1,0]*self.lbar[self.S-1] + (1 + r_path[0] - self.delta)*self.a_init[:,self.S-1] + bqvec_path[:,self.S-1,0])\
/(1+self.Kids[:,-1,0]*Gamma[:,-1,0]+w_path[:,0]*self.e[:,self.S-1,0]*(self.chi/(w_path[:,0]*self.e[:,self.S-1,0]))**(self.rho))
cK_matrix[:,self.S-1,0] = c_matrix[:,self.S-1,0]*Gamma[:,-1,0]
#Initial guess for agents currently alive
cK0alive_guess = np.ones((self.I, self.S-1))*.3
#Fills in c_matrix and a_matrix with the correct decisions for agents currently alive
start=time.time()
opt.root(Alive_EulerSystem, cK0alive_guess, args=(c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path), method="krylov", tol=1e-8)
if self.Matrix_Time: print "\nFill time: NEW UPPER USING KRYLOV", time.time()-start
#Initializes a guess for the first vector for the fsolve to use
cK0future_guess = np.zeros((self.I,self.T))
for i in range(self.I):
cK0future_guess[i,:] = np.linspace(cK_matrix[i,1,0], self.cKvec_ss[i,-1], self.T)
#Solves for the entire consumption and assets matrices for agents not currently born
start=time.time()
opt.root(Future_EulerSystem, cK0future_guess, args=(c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path), method="krylov", tol=1e-8)
if self.Matrix_Time: print "lower triangle fill time NOW USING KRYLOV", time.time()-start
#Prints consumption and assets matrices for country 0.
#NOTE: the output is the transform of the original matrices, so each row is time and each col is cohort
if Print_caTimepaths:
print "Consumption Matrix for country 0", str("("+self.I_touse[0]+")")
print np.round(np.transpose(c_matrix[0,:,:self.T]), decimals=3)
print "Assets Matrix for country 0", str("("+self.I_touse[0]+")")
print np.round(np.transpose(a_matrix[0,:,:self.T]), decimals=3)
#Prints if each set of conditions are satisfied or not
if Print_HH_Eulers:
#Gets matrices for the disparities of critical household conditions and constraints
Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio, Household_Euler = check_household_conditions(w_path, r_path, c_matrix, cK_matrix, a_matrix, Gamma, bqvec_path)
#Checks to see if all of the Eulers are close enough to 0
print "\nEuler Household satisfied:", np.isclose(np.max(np.absolute(Household_Euler)), 0), np.max(np.absolute(Household_Euler))
print "Equation 4.26 satisfied:", np.isclose(np.max(np.absolute(Chained_C_Condition)), 0), np.max(np.absolute(Chained_C_Condition))
print "Equation 4.23 satisfied:", np.isclose(np.max(np.absolute(Modified_Budget_Constraint)), 0), np.max(np.absolute(Modified_Budget_Constraint))
print "Equation 4.25 satisfied", np.isclose(np.max(np.absolute(Consumption_Ratio)), 0), np.max(np.absolute(Consumption_Ratio))
#print np.round(np.transpose(Household_Euler[0,:]), decimals=8)
#print np.round(np.transpose(Modified_Budget_Constraint[0,:,:]), decimals=4)
#print np.round(np.transpose(Consumption_Ratio[0,:,:]), decimals=4)
#Returns only up until time T and not the vector
#print c_matrix[0,:,:self.T]
return c_matrix[:,:,:self.T], cK_matrix[:,:,:self.T], a_matrix[:,:-1,:self.T]
#GetTPIComponents continues here
#Equation 3.25, note that this hasn't changed from stage 3 to stage 4
alphvec=np.ones(self.I)*self.alpha
w_path = np.einsum("it,i->it",np.einsum("i,t->it",alphvec,1/r_path)**(self.alpha/(1-self.alpha)),(1-self.alpha)*self.A)
#Equation 4.22
Gamma = self.get_Gamma(w_path,self.e)
#Equations 4.25, 4.23
c_matrix, cK_matrix, a_matrix = get_c_cK_a_matrices(w_path, r_path, Gamma, bqvec_path, Print_HH_Eulers, Print_caTimepaths)
#Equation 4.24
lhat_path = self.get_lhat(c_matrix, w_path[:,:self.T], self.e[:,:,:self.T])
#Equation 4.17
n_path = self.get_n(lhat_path)
#Equation 4.16
kd_path = np.sum(a_matrix*self.Nhat[:,:,:self.T],axis=1)
#Equation 4.18
y_path = self.get_Y(kd_path,n_path)
#Equation 4.28
kf_path = np.outer(self.alpha*self.A, 1/r_path[:self.T])**( 1/(1-self.alpha) )*n_path - kd_path
return w_path, c_matrix, cK_matrix, a_matrix, kd_path, kf_path, n_path, y_path, lhat_path
def EulerSystemTPI(self, guess, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Gives a system of Euler equations that must be satisfied (or = 0) for the transition paths to solve.
Inputs:
- guess = Array [(I+1)*T]: Current guess for the transition paths of bq and r
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For de-bugging mostly
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
Variables Called from Object:
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of each country for each age cohort and year
- self.Nhat = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.Timepath_counter = Int: Counter that keeps track of the number of iterations in solving for the time paths
- self.IterationsToShow = Set: A set of user inputs of iterations of TPI graphs to show
Variables Stored in Object:
- None
Other Functions Called:
- self.GetTPIComponents = Gets the transition paths for all the other variables in the model as a function of bqvec_path and r_path
- self.plot_timepaths = Takes the current iteration of the timepaths and plots them into one sheet of graphs
Objects in Function:
- a_matrix = Array: [I,S,T+S], Transition path for assets holdings in each country
- alldeadagent_assets = Array: [I,T+S], Assets of all of the agents who died in each period. Used to get Euler_bq.
- bqvec_path = Array: [I,S,T], Transition path for distribution of bequests for each country
- cK_matrix = Array: [I,S,T], Transition path for Kids consumption in each country
- c_matrix = Array: [I,S,T], Transition path for consumption in each country
- Euler_bq = Array: [I,T], Euler equation that must be satisfied for the model to solve. See Equation 3.29
- Euler_kf = Array: [T], Euler equation that must be satisfied for the model to solve. See Equation 3.24
- kd_path = Array: [I,T], Transition path for total domestically-owned capital in each country
- kf_path = Array: [I,T], Transition path for foreign capital in each country
- lhat_path = Array: [I,S,T], Transition path for leisure for each cohort and country
- n_path = Array: [I,T], Transition path for total labor supply in each country
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
- y_path = Array: [I,T], Transition path for output in each country
Outputs:
- Euler_all = Array: [(I+1)*T], Euler_bq and Euler_kf combined to be the same shape as the input guess
"""
#Current guess for r and bq
guess = np.expand_dims(guess, axis=1).reshape((self.I+1,self.T))
r_path = guess[0,:]
bq_path = guess[1:,:]
#Imposes the steady state on the guesses for r and bq for S periods after T
r_path = np.hstack((r_path, np.ones(self.S)*self.r_ss))
bq_path = np.column_stack(( bq_path, np.outer(self.bqindiv_ss,np.ones(self.S)) ))
#Initilizes the bequests distribution, which essentially is a copy of bq for each eligibly-aged agent
bqvec_path = np.zeros((self.I,self.S,self.T+self.S))
bqvec_path[:,self.FirstFertilityAge:self.FirstDyingAge,:] = np.einsum("it,s->ist", bq_path, \
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Gets all the other variables in the model as a funtion of bq and r
w_path, c_matrix, cK_matrix, a_matrix, kd_path, \
kf_path, n_path, y_path, lhat_path = self.GetTPIComponents(bqvec_path, r_path, Print_HH_Eulers, Print_caTimepaths)
#Sums up all the assets of agents that died in each period
alldeadagent_assets = np.sum(a_matrix[:,self.FirstDyingAge:,:]*\
self.MortalityRates[:,self.FirstDyingAge:,:self.T]*self.Nhat[:,self.FirstDyingAge:,:self.T], axis=1)
#Difference between assets of dead agents and our guesss for bequests. See Equation 3.29
Euler_bq = bq_path[:,:self.T] - alldeadagent_assets/np.sum(self.Nhat[:,self.FirstFertilityAge:self.FirstDyingAge,:self.T],\
axis=1)
#All the foreign-held capital must sum to 0. See Equation 3.24
Euler_kf = np.sum(kf_path,axis=0)
#Both Euler equations in one vector for the fsolve to play nice
Euler_all = np.append(Euler_bq, Euler_kf)
#Prints out info for the current iteration
if self.Iterate:
print "Iteration:", self.Timepath_counter, "Min Euler:", np.min(np.absolute(Euler_all)), "Mean Euler:", np.mean(np.absolute(Euler_all))\
, "Max Euler_bq:", np.max(np.absolute(Euler_bq)), "Max Euler_kf", np.max(np.absolute(Euler_kf))
#Will plot one of the graphs if the user specified outside the class
if self.Timepath_counter in self.IterationsToShow:
self.plot_timepaths(SAVE=False, Paths = (r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path, kd_path, kf_path))
#Keeps track of the current iteration of solving the transition path for the model
self.Timepath_counter += 1
return Euler_all
def Timepath_optimize(self, Print_HH_Eulers, Print_caTimepaths, Iters_to_show = set([])):
"""
Description:
- Solves for the transition path for each variable in the model
Inputs:
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For de-bugging mostly
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
- to_plot = Set: Set of integers that represent iterations of the transition path solver that the user wants plotted
Variables Called from Object:
- self.bqindiv_ss = Array: [I], Bequests each individual receives in the steady-state in each country
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.r_ss = Scalar: Steady state intrest rate
- self.IterationsToShow = Set: A set of user inputs of iterations of TPI graphs to show
Variables Stored in Object:
- self.a_matrix = Array: [I,S,T], Transition path for assets holdings for each cohort in each country
- self.bqindiv_path = Array: [I,T+S], Transition path of bq that is given to each individual
- self.bqvec_path = Array: [I,S,T], Transition path for distribution of bequests for each country
- self.cK_matrix = Array: [I,S,T], Transition path for Kids consumption for each cohort in each country
- self.c_matrix = Array: [I,S,T], Transition path for consumption for each cohort in each country
- self.kd_path = Array: [I,T], Transition path for total domestically-owned capital in each country
- self.kf_path = Array: [I,T], Transition path for foreign capital in each country
- self.lhat_path = Array: [I,S,T+S], Transition path for leisure for each cohort and country
- self.n_path = Array: [I,T], Transition path for total labor supply in each country
- self.r_path = Array: [T+S], Transition path of r from year t=0 to t=T and imposes the steady state intrest rate for S periods beyond T
- self.w_path = Array: [I,T+S], Transition path for the wage rate in each country with the Steady state imposed for an additional S periods beyond T
- self.y_path = Array: [I,T], Transition path for output in each country
Other Functions Called:
- self.get_initialguesses = Gets initial guesses for the transition paths for r and bq
- self.EulerSystemTPI = Used by opt.solve in order to search over paths for r and bq that satisfy the Euler equations for the model
- self.GetTPIComponents = Gets all the other variables in the model once we already have found the correct paths for r and bq
Objects in Function:
- bqindiv_path_guess = Array: [I,T], Initial guess for the transition path for bq
- guess = Array: [(I+1)*T], Initial guess of r and bq to feed into opt.fsolve
- paths = Array: [I+1,T], Output of opt.fsolve. Contains the correct transition paths for r and bq
- rpath_guess = Array: [T], Initial guess for the transition path for r
Outputs:
- None
"""
#This is a set that will display the plot of the transition paths for all the variables in whatever iterations are in the set
self.IterationsToShow = Iters_to_show
#Gets an initial guess for the transition paths
rpath_guess, bqindiv_path_guess = self.get_initialguesses()
#Appends the guesses to feed into the opt.fsolve
guess = np.append(rpath_guess, bqindiv_path_guess)
#Solves for the correct transition paths
paths = opt.fsolve(self.EulerSystemTPI, guess, args=(Print_HH_Eulers, Print_caTimepaths))#, method="krylov", tol=1e-8)["x"]
#Reshapes the output of the opt.fsolve so that the first row is the transition path for r and
#the second through I rows are the transition paths of bq for each country
paths = np.expand_dims(paths, axis=1).reshape((self.I+1,self.T))
#Imposes the steady state for S years beyond time T
self.r_path = np.hstack((paths[0,:], np.ones(self.S)*self.r_ss))
self.bqindiv_path = np.column_stack(( paths[1:,:], np.outer(self.bqindiv_ss,np.ones(self.S)) ))
#Initialize bequests distribution
self.bqvec_path = np.zeros((self.I,self.S,self.T+self.S))
self.bqvec_path[:,self.FirstFertilityAge:self.FirstDyingAge,:] = np.einsum("it,s->ist", self.bqindiv_path, \
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Gets the other variables in the model
self.w_path, self.c_matrix, self.cK_matrix, self.a_matrix, self.kd_path, self.kf_path, self.n_path, self.y_path, self.lhat_path = \
self.GetTPIComponents(self.bqvec_path, self.r_path, Print_HH_Eulers, Print_caTimepaths)
def plot_timepaths(self, SAVE=False, Paths = None):
"""
Description:
- Take the timepaths and plots them into an image with windows of different graphs
Inputs:
- bq_path = Array:[I,T+S], Given bequests path
- cK_matrix = Array:[I,S,T+S], Given kids consumption matrix
- c_matrix = Array:[I,S,T+S], Given consumption matrix
- kd_path = Array:[I,T+S], Given domestic capital path
- kf_path = Array:[I,T+S], Given foreign capital path
- lhat_path = Array:[I,S,T+S], Given time endowment
- n_path = Array:[I,T+S], Given aggregate labor productivity
- r_path = Array:[T+S], Given interest rate path
- SAVE = Boolean: Switch that determines whether we save the graphs or simply show it.
Variables Called from Object:
- self.cKmatrix = Array: [I,S], Steady State kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kd_ss = Array: [I], Steady state total capital holdings for each country
- self.lhat_ss = Array: [I,S], Steady state leisure decision for each country and cohort
- self.n_ss = Array: [I], Steady state foreign capital in each country
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.Timepath_counter = Int: Counter that keeps track of the number of iterations in solving for the time paths
- self.I_touse = List: [I], Roster of countries that are being used
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- name = String: Name of the .png file that will save the graphs.
- title = String: Overall title of the sheet of graphs
Outputs:
- None
"""
if Paths is None:
r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path, kd_path, kf_path = \
self.r_path, self.bqindiv_path, self.w_path, self.c_matrix, self.cK_matrix, self.lhat_path, self.n_path, self.kd_path, self.kf_path
else:
r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path, kd_path, kf_path = Paths
title = str("S = " + str(self.S) + ", T = " + str(self.T) + ", Iter: " + str(self.Timepath_counter))
plt.suptitle(title)
ax = plt.subplot(331)
for i in range(self.I):
plt.plot(range(self.S+self.T), r_path)
plt.title("r_path")
#plt.legend(self.I_touse)
ax.set_xticklabels([])
ax = plt.subplot(332)
for i in range(self.I):
plt.plot(range(self.S+self.T), bq_path[i,:])
plt.title("bqvec_path")
ax.set_xticklabels([])
ax = plt.subplot(333)
for i in range(self.I):
plt.plot(range(self.S+self.T), w_path[i,:])
plt.title("w_path")
ax.set_xticklabels([])
ax = plt.subplot(334)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((np.sum(c_matrix[i,:,:],axis=0),np.ones(self.S)*np.sum(self.cvec_ss[i,:]))) )
plt.title("C_path")
ax.set_xticklabels([])
ax = plt.subplot(335)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((np.sum(cK_matrix[i,:,:],axis=0),np.ones(self.S)*np.sum(self.cKvec_ss[i,:]))) )
plt.title("CK_path")
ax.set_xticklabels([])
ax = plt.subplot(336)
for i in range(self.I):
plt.plot( range(self.S+self.T), np.hstack((np.sum(lhat_path[i,:,:],axis=0),np.ones(self.S)*np.sum(self.lhat_ss[i,:]))) )
plt.title("Lhat_path")
ax.set_xticklabels([])
plt.subplot(337)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((n_path[i,:],np.ones(self.S)*self.n_ss[i])))
plt.xlabel("Year")
plt.title("n_path")
plt.subplot(338)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((kd_path[i,:],np.ones(self.S)*self.kd_ss[i])) )
plt.xlabel("Year")
plt.title("kd_path")
plt.subplot(339)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((kf_path[i,:],np.ones(self.S)*self.kf_ss[i])))
plt.xlabel("Year")
plt.title("kf_path")
if SAVE:
name= "Graphs/OLGresult_Iter"+str(self.Timepath_counter)+"_"+str(self.I)+"_"+str(self.S)+"_"+str(self.sigma)+".png"
plt.savefig(name)
plt.clf()
else:
plt.show()
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/indexes/datetimes/test_tools.py | 6 | 62998 | """ test to_datetime """
import sys
import pytest
import locale
import calendar
import numpy as np
from datetime import datetime, date, time
from distutils.version import LooseVersion
import pandas as pd
from pandas._libs import tslib, lib
from pandas.core.tools import datetimes as tools
from pandas.core.tools.datetimes import normalize_date
from pandas.compat import lmap
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.util import testing as tm
from pandas.util.testing import assert_series_equal, _skip_if_has_locale
from pandas import (isnull, to_datetime, Timestamp, Series, DataFrame,
Index, DatetimeIndex, NaT, date_range, bdate_range,
compat)
class TimeConversionFormats(object):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301')]
results2 = [Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')]
for vals, expecteds in [(values, (Index(results1), Index(results2))),
(Series(values),
(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2]))]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
def test_to_datetime_format_YYYYMMDD(self):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format='%Y%m%d')
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"), Timestamp("19801222")] +
[Timestamp("19810105")] * 5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format='%Y%m%d', errors='ignore')
expected = Series([datetime(2012, 12, 31),
datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format='%Y%m%d', errors='coerce')
expected = Series(['20121231', '20141231', 'NaT'], dtype='M8[ns]')
assert_series_equal(result, expected)
# GH 10178
def test_to_datetime_format_integer(self):
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y')
assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + '-' + x[4:]) for x in s.apply(str)
])
result = to_datetime(s, format='%Y%m')
assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = '01-{}-2011 00:00:01.978'.format(month_abbr)
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format)
exp = datetime.strptime(val, format)
assert result == exp
def test_to_datetime_format_time(self):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M',
Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M',
Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S',
Timestamp('2010-01-10 13:56:01')] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format) == dt
def test_to_datetime_with_non_exact(self):
# GH 10834
tm._skip_if_has_locale()
# 8904
# exact kw
if sys.version_info < (2, 7):
pytest.skip('on python version < 2.7')
s = Series(['19MAY11', 'foobar19MAY11', '19MAY11:00:00:00',
'19MAY11 00:00:00Z'])
result = to_datetime(s, format='%d%b%y', exact=False)
expected = to_datetime(s.str.extract(r'(\d+\w+\d+)', expand=False),
format='%d%b%y')
assert_series_equal(result, expected)
def test_parse_nanoseconds_with_formula(self):
# GH8989
# trunctaing the nanoseconds when a format was provided
for v in ["2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000", ]:
expected = pd.to_datetime(v)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f")
assert result == expected
def test_to_datetime_format_weeks(self):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
assert to_datetime(s, format=format) == dt
class TestToDatetime(object):
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
assert pd.to_datetime(dt) == Timestamp(dt)
oob_dts = [np.datetime64('1000-01-01'), np.datetime64('5000-01-02'), ]
for dt in oob_dts:
pytest.raises(ValueError, pd.to_datetime, dt, errors='raise')
pytest.raises(ValueError, Timestamp, dt)
assert pd.to_datetime(dt, errors='coerce') is NaT
def test_to_datetime_array_of_dt64s(self):
dts = [np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
pytest.raises(ValueError, pd.to_datetime, dts_with_oob,
errors='raise')
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='coerce'),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
tslib.iNaT,
],
dtype='M8'
)
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='ignore'),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_to_datetime_tz(self):
# xref 8260
# uniform returns a DatetimeIndex
arr = [pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]
result = pd.to_datetime(arr)
expected = DatetimeIndex(
['2013-01-01 13:00:00', '2013-01-02 14:00:00'], tz='US/Pacific')
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [pd.Timestamp('2013-01-01 13:00:00', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00', tz='US/Eastern')]
pytest.raises(ValueError, lambda: pd.to_datetime(arr))
def test_to_datetime_tz_pytz(self):
# xref 8260
tm._skip_if_no_pytz()
import pytz
us_eastern = pytz.timezone('US/Eastern')
arr = np.array([us_eastern.localize(datetime(year=2000, month=1, day=1,
hour=3, minute=0)),
us_eastern.localize(datetime(year=2000, month=6, day=1,
hour=3, minute=0))],
dtype=object)
result = pd.to_datetime(arr, utc=True)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
def test_to_datetime_utc_is_true(self):
# See gh-11934
start = pd.Timestamp('2014-01-01', tz='utc')
end = pd.Timestamp('2014-01-03', tz='utc')
date_range = pd.bdate_range(start, end)
result = pd.to_datetime(date_range, utc=True)
expected = pd.DatetimeIndex(data=date_range)
tm.assert_index_equal(result, expected)
def test_to_datetime_tz_psycopg2(self):
# xref 8260
try:
import psycopg2
except ImportError:
pytest.skip("no psycopg2 installed")
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array([datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2)],
dtype=object)
result = pd.to_datetime(arr, errors='coerce', utc=True)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
# dtype coercion
i = pd.DatetimeIndex([
'2000-01-01 08:00:00+00:00'
], tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None))
assert is_datetime64_ns_dtype(i)
# tz coerceion
result = pd.to_datetime(i, errors='coerce')
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors='coerce', utc=True)
expected = pd.DatetimeIndex(['2000-01-01 13:00:00'],
dtype='datetime64[ns, UTC]')
tm.assert_index_equal(result, expected)
def test_datetime_bool(self):
# GH13176
with pytest.raises(TypeError):
to_datetime(False)
assert to_datetime(False, errors="coerce") is NaT
assert to_datetime(False, errors="ignore") is False
with pytest.raises(TypeError):
to_datetime(True)
assert to_datetime(True, errors="coerce") is NaT
assert to_datetime(True, errors="ignore") is True
with pytest.raises(TypeError):
to_datetime([False, datetime.today()])
with pytest.raises(TypeError):
to_datetime(['20130101', True])
tm.assert_index_equal(to_datetime([0, False, NaT, 0.0],
errors="coerce"),
DatetimeIndex([to_datetime(0), NaT,
NaT, to_datetime(0)]))
def test_datetime_invalid_datatype(self):
# GH13176
with pytest.raises(TypeError):
pd.to_datetime(bool)
with pytest.raises(TypeError):
pd.to_datetime(pd.to_datetime)
class ToDatetimeUnit(object):
def test_unit(self):
# GH 11758
# test proper behavior with erros
with pytest.raises(ValueError):
to_datetime([1], unit='D', format='%Y%m%d')
values = [11111111, 1, 1.0, tslib.iNaT, NaT, np.nan,
'NaT', '']
result = to_datetime(values, unit='D', errors='ignore')
expected = Index([11111111, Timestamp('1970-01-02'),
Timestamp('1970-01-02'), NaT,
NaT, NaT, NaT, NaT],
dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit='D', errors='coerce')
expected = DatetimeIndex(['NaT', '1970-01-02', '1970-01-02',
'NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, unit='D', errors='raise')
values = [1420043460000, tslib.iNaT, NaT, np.nan, 'NaT']
result = to_datetime(values, errors='ignore', unit='s')
expected = Index([1420043460000, NaT, NaT,
NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors='coerce', unit='s')
expected = DatetimeIndex(['NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, errors='raise', unit='s')
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ['foo', Timestamp('20130101')]:
try:
to_datetime(val, errors='raise', unit='s')
except tslib.OutOfBoundsDatetime:
raise AssertionError("incorrect exception raised")
except ValueError:
pass
def test_unit_consistency(self):
# consistency of conversions
expected = Timestamp('1970-05-09 14:25:11')
result = pd.to_datetime(11111111, unit='s', errors='raise')
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='coerce')
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='ignore')
assert result == expected
assert isinstance(result, Timestamp)
def test_unit_with_numeric(self):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr1 = [1.434692e+18, 1.432766e+18]
arr2 = np.array(arr1).astype('int64')
for errors in ['ignore', 'raise', 'coerce']:
result = pd.to_datetime(arr1, errors=errors)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(['NaT',
'2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr = ['foo', 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20',
'NaT',
'NaT'])
arr = [1.434692e+18, 1.432766e+18, 'foo', 'NaT']
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
def test_unit_mixed(self):
# mixed integers/datetimes
expected = DatetimeIndex(['2013-01-01', 'NaT', 'NaT'])
arr = [pd.Timestamp('20130101'), 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise')
expected = DatetimeIndex(['NaT',
'NaT',
'2013-01-01'])
arr = [1.434692e+18, 1.432766e+18, pd.Timestamp('20130101')]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise')
def test_dataframe(self):
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [58, 59],
'second': [10, 11],
'ms': [1, 1],
'us': [2, 2],
'ns': [3, 3]})
result = to_datetime({'year': df['year'],
'month': df['month'],
'day': df['day']})
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:0:00')])
assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[['year', 'month', 'day']].to_dict())
assert_series_equal(result, expected)
# dict but with constructable
df2 = df[['year', 'month', 'day']].to_dict()
df2['month'] = 2
result = to_datetime(df2)
expected2 = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160205 00:0:00')])
assert_series_equal(result, expected2)
# unit mappings
units = [{'year': 'years',
'month': 'months',
'day': 'days',
'hour': 'hours',
'minute': 'minutes',
'second': 'seconds'},
{'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second'},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d))
expected = Series([Timestamp('20150204 06:58:10'),
Timestamp('20160305 07:59:11')])
assert_series_equal(result, expected)
d = {'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second',
'ms': 'ms',
'us': 'us',
'ns': 'ns'}
result = to_datetime(df.rename(columns=d))
expected = Series([Timestamp('20150204 06:58:10.001002003'),
Timestamp('20160305 07:59:11.001002003')])
assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str))
assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
with pytest.raises(ValueError):
to_datetime(df2)
result = to_datetime(df2, errors='coerce')
expected = Series([Timestamp('20150204 00:00:00'),
NaT])
assert_series_equal(result, expected)
# extra columns
with pytest.raises(ValueError):
df2 = df.copy()
df2['foo'] = 1
to_datetime(df2)
# not enough
for c in [['year'],
['year', 'month'],
['year', 'month', 'second'],
['month', 'day'],
['year', 'day', 'second']]:
with pytest.raises(ValueError):
to_datetime(df[c])
# duplicates
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
df2.columns = ['year', 'year', 'day']
with pytest.raises(ValueError):
to_datetime(df2)
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5],
'hour': [4, 5]})
df2.columns = ['year', 'month', 'day', 'day']
with pytest.raises(ValueError):
to_datetime(df2)
def test_dataframe_dtypes(self):
# #13451
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
# int16
result = to_datetime(df.astype('int16'))
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# mixed dtypes
df['month'] = df['month'].astype('int8')
df['day'] = df['day'].astype('int8')
result = to_datetime(df)
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# float
df = DataFrame({'year': [2000, 2001],
'month': [1.5, 1],
'day': [1, 1]})
with pytest.raises(ValueError):
to_datetime(df)
class ToDatetimeMisc(object):
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = idx.to_datetime()
expected = DatetimeIndex(pd.to_datetime(idx.values))
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
tm.assert_index_equal(result, expected)
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
assert result[0] == exp
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
assert rs, xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
assert result[0] == s[0]
def test_to_datetime_with_space_in_series(self):
# GH 6428
s = Series(['10/18/2006', '10/18/2008', ' '])
pytest.raises(ValueError, lambda: to_datetime(s, errors='raise'))
result_coerce = to_datetime(s, errors='coerce')
expected_coerce = Series([datetime(2006, 10, 18),
datetime(2008, 10, 18),
NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors='ignore')
tm.assert_series_equal(result_ignore, s)
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
tm._skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1, 2, 3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1, 2, 3])
pytest.raises(ValueError,
lambda: pd.to_datetime(td, format='%b %y',
errors='raise'))
pytest.raises(ValueError,
lambda: td.apply(pd.to_datetime, format='%b %y',
errors='raise'))
expected = pd.to_datetime(td, format='%b %y', errors='coerce')
result = td.apply(
lambda x: pd.to_datetime(x, format='%b %y', errors='coerce'))
assert_series_equal(result, expected)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
assert result is NaT
result = to_datetime(['', ''])
assert isnull(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ['20120101', '20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp, array)
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
def test_to_datetime_unprocessable_input(self):
# GH 4928
tm.assert_numpy_array_equal(
to_datetime([1, '1'], errors='ignore'),
np.array([1, '1'], dtype='O')
)
pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype('O')
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
assert xp.freq == rs.freq
assert xp.tzinfo == rs.tzinfo
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if isnull(val):
expected[i] = tslib.iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
# GH 10636, default is now 'raise'
pytest.raises(ValueError,
lambda: to_datetime(malformed, errors='raise'))
result = to_datetime(malformed, errors='ignore')
tm.assert_numpy_array_equal(result, malformed)
pytest.raises(ValueError, to_datetime, malformed, errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = tslib.iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected, check_names=False)
assert result.name == 'foo'
assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == 'foo'
def test_dti_constructor_numpy_timeunits(self):
# GH 9114
base = pd.to_datetime(['2000-01-01T00:00', '2000-01-02T00:00', 'NaT'])
for dtype in ['datetime64[h]', 'datetime64[m]', 'datetime64[s]',
'datetime64[ms]', 'datetime64[us]', 'datetime64[ns]']:
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values), base)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
class TestGuessDatetimeFormat(object):
def test_guess_datetime_format_with_parseable_formats(self):
tm._skip_if_not_us_locale()
dt_string_to_format = (('20111230', '%Y%m%d'),
('2011-12-30', '%Y-%m-%d'),
('30-12-2011', '%d-%m-%Y'),
('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'),
('2011-12-30 00:00:00.000000',
'%Y-%m-%d %H:%M:%S.%f'), )
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_with_dayfirst(self):
ambiguous_string = '01/01/2011'
assert tools._guess_datetime_format(
ambiguous_string, dayfirst=True) == '%d/%m/%Y'
assert tools._guess_datetime_format(
ambiguous_string, dayfirst=False) == '%m/%d/%Y'
def test_guess_datetime_format_with_locale_specific_formats(self):
# The month names will vary depending on the locale, in which
# case these wont be parsed properly (dateutil can't parse them)
tm._skip_if_has_locale()
dt_string_to_format = (('30/Dec/2011', '%d/%b/%Y'),
('30/December/2011', '%d/%B/%Y'),
('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S'), )
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_invalid_inputs(self):
# A datetime string must include a year, month and a day for it
# to be guessable, in addition to being a string that looks like
# a datetime
invalid_dts = [
'2013',
'01/2013',
'12:00:00',
'1/1/1/1',
'this_is_not_a_datetime',
'51a',
9,
datetime(2011, 1, 1),
]
for invalid_dt in invalid_dts:
assert tools._guess_datetime_format(invalid_dt) is None
def test_guess_datetime_format_nopadding(self):
# GH 11142
dt_string_to_format = (('2011-1-1', '%Y-%m-%d'),
('30-1-2011', '%d-%m-%Y'),
('1/1/2011', '%m/%d/%Y'),
('2011-1-1 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-1-1 0:0:0', '%Y-%m-%d %H:%M:%S'),
('2011-1-3T00:00:0', '%Y-%m-%dT%H:%M:%S'))
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_for_array(self):
tm._skip_if_not_us_locale()
expected_format = '%Y-%m-%d %H:%M:%S.%f'
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype='O'),
np.array([np.nan, np.nan, dt_string], dtype='O'),
np.array([dt_string, 'random_string'], dtype='O'),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(
test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array(
[np.nan, np.nan, np.nan], dtype='O'))
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat(object):
def test_to_datetime_infer_datetime_format_consistent_format(self):
s = pd.Series(pd.date_range('20000101', periods=50, freq='H'))
test_formats = ['%m-%d-%Y', '%m/%d/%Y %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f']
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(s_as_dt_strings, format=test_format)
no_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=False)
yes_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=True)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
def test_to_datetime_infer_datetime_format_inconsistent_format(self):
s = pd.Series(np.array(['01/01/2011 00:00:00',
'01-02-2011 00:00:00',
'2011-01-03T00:00:00']))
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
s = pd.Series(np.array(['Jan/01/2011', 'Feb/01/2011', 'Mar/01/2011']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_infer_datetime_format_series_with_nans(self):
s = pd.Series(np.array(['01/01/2011 00:00:00', np.nan,
'01/03/2011 00:00:00', np.nan]))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_infer_datetime_format_series_starting_with_nans(self):
s = pd.Series(np.array([np.nan, np.nan, '01/01/2011 00:00:00',
'01/02/2011 00:00:00', '01/03/2011 00:00:00']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_iso8601_noleading_0s(self):
# GH 11871
s = pd.Series(['2014-1-1', '2014-2-2', '2015-3-3'])
expected = pd.Series([pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-02-02'),
pd.Timestamp('2015-03-03')])
tm.assert_series_equal(pd.to_datetime(s), expected)
tm.assert_series_equal(pd.to_datetime(s, format='%Y-%m-%d'), expected)
class TestDaysInMonth(object):
# tests for issue #10154
def test_day_not_in_month_coerce(self):
assert isnull(to_datetime('2015-02-29', errors='coerce'))
assert isnull(to_datetime('2015-02-29', format="%Y-%m-%d",
errors='coerce'))
assert isnull(to_datetime('2015-02-32', format="%Y-%m-%d",
errors='coerce'))
assert isnull(to_datetime('2015-04-31', format="%Y-%m-%d",
errors='coerce'))
def test_day_not_in_month_raise(self):
pytest.raises(ValueError, to_datetime, '2015-02-29',
errors='raise')
pytest.raises(ValueError, to_datetime, '2015-02-29',
errors='raise', format="%Y-%m-%d")
pytest.raises(ValueError, to_datetime, '2015-02-32',
errors='raise', format="%Y-%m-%d")
pytest.raises(ValueError, to_datetime, '2015-04-31',
errors='raise', format="%Y-%m-%d")
def test_day_not_in_month_ignore(self):
assert to_datetime('2015-02-29', errors='ignore') == '2015-02-29'
assert to_datetime('2015-02-29', errors='ignore',
format="%Y-%m-%d") == '2015-02-29'
assert to_datetime('2015-02-32', errors='ignore',
format="%Y-%m-%d") == '2015-02-32'
assert to_datetime('2015-04-31', errors='ignore',
format="%Y-%m-%d") == '2015-04-31'
class TestDatetimeParsingWrappers(object):
def test_does_not_convert_mixed_integer(self):
bad_date_strings = ('-50000', '999', '123.1234', 'm', 'T')
for bad_date_string in bad_date_strings:
assert not tslib._does_string_look_like_datetime(bad_date_string)
good_date_strings = ('2012-01-01',
'01/01/2012',
'Mon Sep 16, 2013',
'01012012',
'0101',
'1-1', )
for good_date_string in good_date_strings:
assert tslib._does_string_look_like_datetime(good_date_string)
def test_parsers(self):
# https://github.com/dateutil/dateutil/issues/217
import dateutil
yearfirst = dateutil.__version__ >= LooseVersion('2.5.0')
cases = {'2011-01-01': datetime(2011, 1, 1),
'2Q2005': datetime(2005, 4, 1),
'2Q05': datetime(2005, 4, 1),
'2005Q1': datetime(2005, 1, 1),
'05Q1': datetime(2005, 1, 1),
'2011Q3': datetime(2011, 7, 1),
'11Q3': datetime(2011, 7, 1),
'3Q2011': datetime(2011, 7, 1),
'3Q11': datetime(2011, 7, 1),
# quarterly without space
'2000Q4': datetime(2000, 10, 1),
'00Q4': datetime(2000, 10, 1),
'4Q2000': datetime(2000, 10, 1),
'4Q00': datetime(2000, 10, 1),
'2000q4': datetime(2000, 10, 1),
'2000-Q4': datetime(2000, 10, 1),
'00-Q4': datetime(2000, 10, 1),
'4Q-2000': datetime(2000, 10, 1),
'4Q-00': datetime(2000, 10, 1),
'00q4': datetime(2000, 10, 1),
'2005': datetime(2005, 1, 1),
'2005-11': datetime(2005, 11, 1),
'2005 11': datetime(2005, 11, 1),
'11-2005': datetime(2005, 11, 1),
'11 2005': datetime(2005, 11, 1),
'200511': datetime(2020, 5, 11),
'20051109': datetime(2005, 11, 9),
'20051109 10:15': datetime(2005, 11, 9, 10, 15),
'20051109 08H': datetime(2005, 11, 9, 8, 0),
'2005-11-09 10:15': datetime(2005, 11, 9, 10, 15),
'2005-11-09 08H': datetime(2005, 11, 9, 8, 0),
'2005/11/09 10:15': datetime(2005, 11, 9, 10, 15),
'2005/11/09 08H': datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10,
36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GH 10537
'2014-06': datetime(2014, 6, 1),
'06-2014': datetime(2014, 6, 1),
'2014-6': datetime(2014, 6, 1),
'6-2014': datetime(2014, 6, 1),
'20010101 12': datetime(2001, 1, 1, 12),
'20010101 1234': datetime(2001, 1, 1, 12, 34),
'20010101 123456': datetime(2001, 1, 1, 12, 34, 56),
}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str,
yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(np.array([date_str], dtype=object),
yearfirst=yearfirst)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfist, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq='S', periods=1,
yearfirst=yearfirst)
assert result7 == expected
# NaT
result1, _, _ = tools.parse_time_string('NaT')
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
assert result1 is tslib.NaT
assert result1 is tslib.NaT
assert result1 is tslib.NaT
assert result1 is tslib.NaT
def test_parsers_quarter_invalid(self):
cases = ['2Q 2005', '2Q-200A', '2Q-200', '22Q2005', '6Q-20', '2Q200.']
for case in cases:
pytest.raises(ValueError, tools.parse_time_string, case)
def test_parsers_dayfirst_yearfirst(self):
tm._skip_if_no_dateutil()
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
import dateutil
is_lt_253 = dateutil.__version__ < LooseVersion('2.5.3')
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False,
datetime(2012, 10, 11)),
(True, False,
datetime(2012, 11, 10)),
(False, True,
datetime(2010, 11, 12)),
(True, True,
datetime(2010, 12, 11))],
'20/12/21': [(False, False,
datetime(2021, 12, 20)),
(True, False,
datetime(2021, 12, 20)),
(False, True,
datetime(2020, 12, 21)),
(True, True,
datetime(2020, 12, 21))]}
from dateutil.parser import parse
for date_str, values in compat.iteritems(cases):
for dayfirst, yearfirst, expected in values:
# odd comparisons across version
# let's just skip
if dayfirst and yearfirst and is_lt_253:
continue
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
assert dateutil_result == expected
result1, _, _ = tools.parse_time_string(date_str,
dayfirst=dayfirst,
yearfirst=yearfirst)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
result4 = DatetimeIndex([date_str], dayfirst=dayfirst,
yearfirst=yearfirst)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
def test_parsers_timestring(self):
tm._skip_if_no_dateutil()
from dateutil.parser import parse
# must be the same as dateutil result
cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))}
for date_str, (exp_now, exp_def) in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
def test_parsers_time(self):
# GH11818
_skip_if_has_locale()
strings = ["14:15", "1415", "2:15pm", "0215pm", "14:15:00", "141500",
"2:15:00pm", "021500pm", time(14, 15)]
expected = time(14, 15)
for time_string in strings:
assert tools.to_time(time_string) == expected
new_string = "14.15"
pytest.raises(ValueError, tools.to_time, new_string)
assert tools.to_time(new_string, format="%H.%M") == expected
arg = ["14:15", "20:20"]
expected_arr = [time(14, 15), time(20, 20)]
assert tools.to_time(arg) == expected_arr
assert tools.to_time(arg, format="%H:%M") == expected_arr
assert tools.to_time(arg, infer_time_format=True) == expected_arr
assert tools.to_time(arg, format="%I:%M%p",
errors="coerce") == [None, None]
res = tools.to_time(arg, format="%I:%M%p", errors="ignore")
tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))
with pytest.raises(ValueError):
tools.to_time(arg, format="%I:%M%p", errors="raise")
tm.assert_series_equal(tools.to_time(Series(arg, name="test")),
Series(expected_arr, name="test"))
res = tools.to_time(np.array(arg))
assert isinstance(res, list)
assert res == expected_arr
def test_parsers_monthfreq(self):
cases = {'201101': datetime(2011, 1, 1, 0, 0),
'200005': datetime(2000, 5, 1, 0, 0)}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str, freq='M')
assert result1 == expected
def test_parsers_quarterly_with_freq(self):
msg = ('Incorrect quarterly string is given, quarter '
'must be between 1 and 4: 2013Q5')
with tm.assert_raises_regex(tslib.DateParseError, msg):
tools.parse_time_string('2013Q5')
# GH 5418
msg = ('Unable to retrieve month information from given freq: '
'INVLD-L-DEC-SAT')
with tm.assert_raises_regex(tslib.DateParseError, msg):
tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT')
cases = {('2013Q2', None): datetime(2013, 4, 1),
('2013Q2', 'A-APR'): datetime(2012, 8, 1),
('2013-Q2', 'A-DEC'): datetime(2013, 4, 1)}
for (date_str, freq), exp in compat.iteritems(cases):
result, _, _ = tools.parse_time_string(date_str, freq=freq)
assert result == exp
def test_parsers_timezone_minute_offsets_roundtrip(self):
# GH11708
base = to_datetime("2013-01-01 00:00:00")
dt_strings = [
('2013-01-01 05:45+0545',
"Asia/Katmandu",
"Timestamp('2013-01-01 05:45:00+0545', tz='Asia/Katmandu')"),
('2013-01-01 05:30+0530',
"Asia/Kolkata",
"Timestamp('2013-01-01 05:30:00+0530', tz='Asia/Kolkata')")
]
for dt_string, tz, dt_string_repr in dt_strings:
dt_time = to_datetime(dt_string)
assert base == dt_time
converted_time = dt_time.tz_localize('UTC').tz_convert(tz)
assert dt_string_repr == repr(converted_time)
def test_parsers_iso8601(self):
# GH 12060
# test only the iso parser - flexibility to different
# separators and leadings 0s
# Timestamp construction falls back to dateutil
cases = {'2011-01-02': datetime(2011, 1, 2),
'2011-1-2': datetime(2011, 1, 2),
'2011-01': datetime(2011, 1, 1),
'2011-1': datetime(2011, 1, 1),
'2011 01 02': datetime(2011, 1, 2),
'2011.01.02': datetime(2011, 1, 2),
'2011/01/02': datetime(2011, 1, 2),
'2011\\01\\02': datetime(2011, 1, 2),
'2013-01-01 05:30:00': datetime(2013, 1, 1, 5, 30),
'2013-1-1 5:30:00': datetime(2013, 1, 1, 5, 30)}
for date_str, exp in compat.iteritems(cases):
actual = tslib._test_parse_iso8601(date_str)
assert actual == exp
# seperators must all match - YYYYMM not valid
invalid_cases = ['2011-01/02', '2011^11^11',
'201401', '201111', '200101',
# mixed separated and unseparated
'2005-0101', '200501-01',
'20010101 12:3456', '20010101 1234:56',
# HHMMSS must have two digits in each component
# if unseparated
'20010101 1', '20010101 123', '20010101 12345',
'20010101 12345Z',
# wrong separator for HHMMSS
'2001-01-01 12-34-56']
for date_str in invalid_cases:
with pytest.raises(ValueError):
tslib._test_parse_iso8601(date_str)
# If no ValueError raised, let me know which case failed.
raise Exception(date_str)
class TestArrayToDatetime(object):
def test_try_parse_dates(self):
from dateutil.parser import parse
arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object)
result = lib.try_parse_dates(arr, dayfirst=True)
expected = [parse(d, dayfirst=True) for d in arr]
assert np.array_equal(result, expected)
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_parsing_timezone_offsets(self):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
dt_strings = [
'01-01-2013 08:00:00+08:00',
'2013-01-01T08:00:00.000000000+0800',
'2012-12-31T16:00:00.000000000-0800',
'12-31-2012 23:00:00-01:00'
]
expected_output = tslib.array_to_datetime(np.array(
['01-01-2013 00:00:00'], dtype=object))
for dt_string in dt_strings:
tm.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([dt_string], dtype=object)
),
expected_output
)
def test_number_looking_strings_not_into_datetime(self):
# #4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
date(1000, 1, 1),
datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01'),
]
for invalid_date in invalid_dates:
pytest.raises(ValueError,
tslib.array_to_datetime,
np.array([invalid_date], dtype='object'),
errors='raise', )
tm.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([invalid_date], dtype='object'),
errors='coerce'),
np.array([tslib.iNaT], dtype='M8[ns]')
)
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
# With coercing, the invalid dates becomes iNaT
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT
],
dtype='M8[ns]'
)
)
def test_normalize_date():
value = date(2012, 9, 7)
result = normalize_date(value)
assert (result == datetime(2012, 9, 7))
value = datetime(2012, 9, 7, 12)
result = normalize_date(value)
assert (result == datetime(2012, 9, 7))
@pytest.fixture(params=['D', 's', 'ms', 'us', 'ns'])
def units(request):
return request.param
@pytest.fixture
def epoch_1960():
# for origin as 1960-01-01
return Timestamp('1960-01-01')
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=[epoch_1960(),
epoch_1960().to_pydatetime(),
epoch_1960().to_datetime64(),
str(epoch_1960())])
def epochs(request):
return request.param
@pytest.fixture
def julian_dates():
return pd.date_range('2014-1-1', periods=10).to_julian_date().values
class TestOrigin(object):
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(
julian_dates, unit='D', origin='julian'))
expected = Series(pd.to_datetime(
julian_dates - pd.Timestamp(0).to_julian_date(), unit='D'))
assert_series_equal(result, expected)
result = Series(pd.to_datetime(
[0, 1, 2], unit='D', origin='unix'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime(
[0, 1, 2], unit='D'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin='julian', unit='D')
assert result.to_julian_date() == 2456658
# out-of-bounds
with pytest.raises(ValueError):
pd.to_datetime(1, origin="julian", unit='D')
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != 'D':
with pytest.raises(ValueError):
pd.to_datetime(julian_dates, unit=units, origin='julian')
def test_invalid_origin(self):
# need to have a numeric specified
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit='D')
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) +
epoch_1960 for x in units_from_epochs])
result = Series(pd.to_datetime(
units_from_epochs, unit=units, origin=epochs))
assert_series_equal(result, expected)
@pytest.mark.parametrize("origin, exc",
[('random_string', ValueError),
('epoch', ValueError),
('13-24-1990', ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime)])
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
with pytest.raises(exc):
pd.to_datetime(units_from_epochs, unit=units,
origin=origin)
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit='D')
expected = Timestamp('2169-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(200 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2069-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(300 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2169-10-20 00:00:00')
assert result == expected
| mit |
deepesch/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
lucidjuvenal/quis-custodiet | twitter_feed/twittest.py | 1 | 1922 | import twitter # python-twitter package
from matplotlib.pyplot import pause
import re
############################################
# secret data kept in separate file
with open('twitdat.txt') as f:
fromFile = {}
for line in f:
line = line.split() # to skip blank lines
if len(line)==3 : #
fromFile[line[0]] = line[2]
f.close()
#print fromFile
api = twitter.Api(
consumer_key = fromFile['consumer_key'],
consumer_secret = fromFile['consumer_secret'],
access_token_key = fromFile['access_token_key'],
access_token_secret = fromFile['access_token_secret']
)
# https://twitter.com/gov/status/743263851366449152
tweetID = 743263851366449152
# https://twitter.com/BBCMOTD/status/744216695976255492
tweetID = 744216695976255492
# https://twitter.com/BBCMOTD/status/744281250924474368
tweetID = 744281250924474368
try:
tweet = api.GetStatus(status_id = tweetID)
except ConnectionError :
print "should have a backup here"
candidates = ['goodguy', 'evilguy']
tags = ['precinct','ballotbox']
tags.extend(candidates)
tags = set(tags)
def getVotes(tweet,tags):
'''
tweet is the Status object from the python-twitter api.
tags is a set of strings
currently returns correct data for well-formatted tweet text
need to include checks for multiple numbers/candidates per line, give error
'''
data = {}
lines = re.split('[,;\n]', tweet.text.lower())
for line in lines:
if '#' not in line: # Ignore hashtags
for tag in tags:
if tag in line:
data[tag] = int(re.search(r'\d+', line).group())
return data
def testMsgs(tweet, msgs):
for msg in msgs:
tweet.text = msg
def subTweet(tweet,msgID=0):
t1 = "Goodguy 57 votes!\nEvilguy 100\n#Hashtest"
t2 = "57 Goodguy\n100 Evilguy\n#Hashtest"
t3 = "goodguy 57 evilguy 100"
msgs = [ [], t1, t2, t3 ]
tweet.text = msgs[msgID]
return tweet
tweet = subTweet(tweet, 3)
print getVotes(tweet, tags)
| gpl-3.0 |
rustychris/stompy | stompy/plot/plot_utils.py | 1 | 45515 | from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import zip
from builtins import range
from builtins import object
import time
from matplotlib.collections import LineCollection
from matplotlib.transforms import Transform,Affine2D
import matplotlib.transforms as transforms
from matplotlib import collections, path
import matplotlib.patheffects as pe
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.tri import Triangulation
from matplotlib import ticker
import numpy as np
from .. import utils
from six import string_types
try:
import xarray as xr
except ImportError:
xr="XARRAY NOT AVAILABLE"
def pick_points(n):
"""
convenience function for getting coordinates from the plot:
this is not much better than plt.ginput(). you should
probably just use that.
"""
count = [0]
pick_points.results = np.zeros( (n,2), float64)
fig = plt.gcf()
cid = None
def click_handler(event):
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
event.button, event.x, event.y, event.xdata, event.ydata))
if event.xdata:
pick_points.results[count[0]] = [event.xdata, event.ydata]
count[0] += 1
if count[0] >= n:
fig.canvas.mpl_disconnect(cid)
cid = fig.canvas.mpl_connect('button_press_event', click_handler)
# A rehash of pick_points:
def ax_picker(ax):
fig = ax.figure
if hasattr(ax,'pick_cids'):
for cid in ax.pick_cids:
fig.canvas.mpl_disconnect(cid)
def init_picked():
ax.picked = zeros( (0,4), float64)
ax.pick_start = None
init_picked()
def on_press(event):
if fig.canvas.toolbar.mode != '':
return
if event.button==1 and event.xdata:
ax.pick_start = [event.xdata,event.ydata]
elif event.button==3:
print(ax.picked)
init_picked()
def on_release(event):
if fig.canvas.toolbar.mode != '':
return
if event.xdata and ax.pick_start is not None:
new_pnt = np.array([ax.pick_start[0],event.xdata,ax.pick_start[1],event.ydata])
ax.picked=utils.array_append( ax.picked,new_pnt )
cid_p = fig.canvas.mpl_connect('button_press_event', on_press)
cid_r = fig.canvas.mpl_connect('button_release_event', on_release)
ax.pick_cids = [cid_p,cid_r]
def draw_polyline(ax=None,remove=True):
"""
rough and ready interface to capture a polyline in a plot window.
left clicks add a point, right click ends. returns the points in
a numpy array.
"""
ax=ax or plt.gca()
fig=ax.get_figure()
collecting=[1]
pick_points=[]
line=ax.plot([],[],'r-o')[0]
def click_handler(event):
if fig.canvas.toolbar.mode != '':
return
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
event.button, event.x, event.y, event.xdata, event.ydata))
if event.button==1 and event.xdata:
pick_points.append( [event.xdata,event.ydata] )
x=[p[0] for p in pick_points]
y=[p[1] for p in pick_points]
line.set_xdata(x)
line.set_ydata(y)
elif event.button==3:
print("Done collecting points")
collecting[0]=0
cid = fig.canvas.mpl_connect('button_press_event', click_handler)
while collecting[0]:
plt.pause(0.01)
fig.canvas.mpl_disconnect(cid)
if remove:
ax.lines.remove(line)
plt.draw()
return np.array(pick_points)
def plotyy( x1, y1, x2, y2, color1='b', color2='g', fun=None, **kwargs ):
"""
A work-alike of the Matlab (TM) function of the same name. This
places two curves on the same axes using the same x-axis, but
different y-axes.
Call signature::
ax, h1, h2 = plotyy( x1, y2, x2, y2, color1='b', color2='g',
fun=None, **kwargs )
color1 and color2 are the colors to make respective curves and y-axes.
fun is the function object to use for plotting. Must accept calls
of the form fun(x,y,color='color',**kwargs). Typically, something
like plot, semilogy, semilogx or loglog. If *None*, defaults to
pyplot.plot.
**kwargs is any list of keyword arguments accepted by fun.
ax is a 2 element list with the handles for the first and second
axes. h1 is the handle to the first curve, h2 to the second
curve.
NOTE that this function won't scale two curves so that y-ticks are
in the same location as the Matlab (TM) version does.
"""
if fun == None: fun = plot
ax1 = plt.gca()
ax1.clear()
# Get axes location
try:
rect = ax1.get_position().bounds
except AttributeError:
rect = np.array( ax1.get_position() )
rect[2:] += rect[:2]
# Add first curve
h1 = fun( x1, y1, color=color1, **kwargs )
# Add second axes on top of first with joined x-axis
ax2 = plt.twinx(ax1)
# Plot second curve initially
h2 = fun( x2, y2, color=color2, **kwargs )
# Set axis properties
plt.setp( ax2.get_xticklabels(), visible=False)
# Change colors appropriately
def recolor( obj, col ):
try: obj.set_color( col )
except: pass
try: obj.set_facecolor( col )
except: pass
try: obj.set_edgecolor( col )
except: pass
try:
ch = obj.get_children()
for c in ch:
recolor( c, col )
except: pass
recolor( ax1.yaxis, color1 )
recolor( ax2.yaxis, color2 )
plt.draw_if_interactive()
return ( [ax1,ax2], h1, h2 )
# remove parts of the plot that extend beyond the x limits of the
# axis - assumes that the x-data for each line is non-decreasing
def trim_xaxis(ax=None):
ax = ax or plt.gca()
xmin,xmax,ymin,ymax = ax.axis()
for line in ax.lines:
xdata = line.get_xdata()
ydata = line.get_ydata()
i_start = np.searchsorted(xdata,xmin) - 1
if i_start < 0:
i_start = 0
i_end = np.searchsorted(xdata,xmax) + 1
xdata = xdata[i_start:i_end]
ydata = ydata[i_start:i_end]
line.set_xdata(xdata)
line.set_ydata(ydata)
def plot_tri(tri,**kwargs):
# DEPRECATED: matplotlib now has triplot and friends
# compile list of edges, then create the collection, and plot
ex = tri.x[tri.edge_db]
ey = tri.y[tri.edge_db]
edges = np.concatenate( (ex[:,:,newaxis], ey[:,:,newaxis]), axis=2)
colors = np.ones( (len(edges),4), float32 )
colors[:,:3] = 0
colors[:,3] = 1.0
coll = LineCollection(edges,colors=colors)
ax = plt.gca()
ax.add_collection(coll)
def scalebar(xy,L=None,aspect=0.05,unit_factor=1,fmt="%.0f",label_txt=None,fractions=[0,0.5,1.0],
divisions=None,
ax=None,xy_transform=None,dy=None,lw=0.5,
style='altboxes'):
""" Draw a simple scale bar with labels - bottom left
is given by xy.
xy_transform: units for interpreting xy. If not given
The divisions are either inferred from the max length L and fractions of it, or
by specify a list of lengths in divisions
"""
ax = ax or plt.gca()
if xy_transform is None:
txt_trans=xy_transform=ax.transData
else:
# Still have to pull x scaling from the data axis
xy_transform=ScaleXOnly(xy_transform,
ax.transData,xoffset=xy[0])
txt_trans=xy_transform
xy=[0,xy[1]] # x offset now rolled into xy_transform
if divisions is not None:
L=divisions[-1]
else:
if L is None:
xmin,xmax,ymin,ymax = ax.axis()
L = 0.2 * (xmax - xmin)
divisions=[f*L for f in fractions]
xmin,ymin = xy
dx = L
# This is dangerous, as L is in data coordinates, but y may be in data or
# axes coordinates depending on what xy_transform was passed in.
dy = dy or (aspect*L)
# xmax = xmin + L
ymax = ymin + dy
objs = []
txts = []
if style in ('boxes','altboxes'):
# one filled black box:
objs.append( ax.fill([xmin,xmin+dx,xmin+dx,xmin],
[ymin,ymin,ymax,ymax],
'k', edgecolor='k',transform=xy_transform,lw=lw) )
for i in range(len(divisions)-1):
xleft=xmin+divisions[i]
xright=xmin+divisions[i+1]
xlist=[xleft,xright,xright,xleft]
if style=='altboxes':
ybot=ymin+0.5*(i%2)*dy
ytop=ybot+0.5*dy
# print ybot,ytop
objs.append( ax.fill(xlist,
[ybot,ybot,ytop,ytop],
'w', edgecolor='k',transform=xy_transform,lw=lw) )
else:
if y%2==0:
objs.append( ax.fill(xlist,
[ymin,ymin,ymax,ymax],
'w', edgecolor='k',lw=lw,transform=xy_transform) )
elif style=='ticks':
# similar to ticks on an axis
segs=[ [ [xmin,ymin],[xmin+dx,ymin]] ]
for i in range(len(divisions)):
xleft=xmin+divisions[i]
segs.append( [ [xleft,ymin],[xleft,ymax]] )
lcoll=LineCollection(segs,color='k',lw=lw,transform=xy_transform)
objs.append(lcoll)
ax.add_collection(lcoll)
baseline=ymax + 0.25*dy
for div in divisions:
div_txt=fmt%(unit_factor* div)
txts.append( ax.text(xmin+div,baseline,
div_txt,
ha='center',
transform=txt_trans) )
# Really would like for the label to be on the same baseline
# as the fraction texts, and typeset along with the last
# label, but allowing the number of the last label to be
# centered on its mark
if label_txt:
txts.append( ax.text(xmin+div,baseline," "*len(div_txt) + label_txt,ha='left',
transform=txt_trans) )
return objs,txts
def north_arrow(xy,L,ax=None,decl_east=0.0,transform=None,angle=0.0,width=0.1):
ax=ax or plt.gca()
transform=transform or ax.transData
w=width*L
xy=np.asarray(xy)
pnts=np.array( [[0,0], # base of arrow
[0,L], # vertical stroke
[w,0.5*L], # outer hypotenuse
[0,0.55*L]] ) # barb
tot_rot=angle-decl_east
pnts=utils.rot(tot_rot*np.pi/180,pnts)
pnts=pnts+xy
tip=xy+utils.rot(tot_rot*np.pi/180,np.array( [0,1.02*L] ))
obj=ax.fill( pnts[:,0],pnts[:,1],'k',transform=transform)
txt=ax.text(tip[0],tip[1]," $\mathcal{N}$",transform=transform,ha='center',rotation=tot_rot)
return obj,txt
def show_slopes(ax=None,slopes=[-5./3,-1],xfac=5,yfac=3):
ax = ax or plt.gca()
x = np.median( [l.get_xdata()[-1] for l in ax.lines] )
y = np.max( [l.get_ydata()[-1] for l in ax.lines] )
y *= yfac # set the legend above the plotted lines
xs = np.array([x/xfac,x])
for s in slopes:
ys = np.array([y/xfac**s,y])
ax.loglog(xs,ys,c='0.5')
plt.annotate("%g"%s,[xs[0],ys[0]])
class LogLogSlopeGrid(object):
""" draw evenly spaced lines, for now in log-log space, at a given slope.
y=mx+b
"""
def __init__(self,ax=None,slopes=[-5/3.],intervals=[10],xmin=None,xmax=None):
""" Note that intervals is linear!
"""
self.ax = ax or plt.gca()
self.slopes = slopes
self.intervals = intervals
self.colls = []
self.xlog = self.ylog = True
self.xmin=xmin
self.xmax=xmax
self.draw()
def draw(self):
for c in self.colls:
self.ax.collections.remove(c)
self.colls = []
xmin,xmax,ymin,ymax = self.ax.axis()
# allow override
if self.xmin is not None:
xmin=self.xmin
if self.xmax is not None:
xmax=self.xmax
if self.xlog:
xmin = np.log(xmin) ; xmax = np.log(xmax)
if self.ylog:
ymin = np.log(ymin) ; ymax = np.log(ymax)
for s,interval in zip(self.slopes,self.intervals):
corners = np.array( [[xmin,ymin],
[xmax,ymin],
[xmax,ymax],
[xmin,ymax]] )
corner_b = corners[:,1] - s*corners[:,0]
if self.ylog:
interval = np.log(interval)
all_b = np.arange(corner_b.min(),corner_b.max(),interval)
segs = np.zeros( (len(all_b),2,2), np.float64)
segs[:,0,0] = xmin
segs[:,1,0] = xmax
segs[:,0,1] = s*xmin+all_b
segs[:,1,1] = s*xmax+all_b
if self.xlog:
segs[...,0] = np.exp(segs[...,0])
if self.ylog:
segs[...,1] = np.exp(segs[...,1])
coll = LineCollection(segs,color='0.75',zorder=-10)
self.ax.add_collection(coll)
self.colls.append(coll)
def enable_picker(coll,ax=None,cb=None,points=5):
""" minimal wrapper for selecting indexes from a collection, like a
scatter plot. cb gets the first index chosen, and a handful of kw arguments:
ax: the axes
coll: collection
event: event
dataxy: data coordinates for the selected item
returns an object which when called always returns the most recent index chosen
"""
ax = ax or coll.axes # was plt.gca(), then coll.get_axes(). modern is just .axes
coll.set_picker(points) # should be 5 points
class dummy(object):
idx = None
def __call__(self):
return self.idx
my_dummy = dummy()
def onpick(event):
if event.artist == coll:
idx = event.ind[0]
my_dummy.idx = idx
if cb:
if hasattr(coll,'get_offsets'): # for scatter plots
dataxy=coll.get_offsets()[idx]
elif hasattr(coll,'get_xydata'): # for lines
dataxy=coll.get_xydata()[idx]
kws=dict(ax=ax,coll=coll,event=event,dataxy=dataxy)
print("in onpick: kws:",kws)
cb(idx,**kws)
else:
pass
my_dummy.cid = ax.figure.canvas.mpl_connect('pick_event',onpick)
return my_dummy
class tooltipper(object):
""" similar to enable_picker, but displays a transient text box
tips: either a callable, called as in enable_picker, which returns the string
to display, or a list of strings, which will be indexed by idx from the
collection.
tips calling convention: the idx as the first argument, plus keywords:
ax: the axes
coll: collection
event: event
dataxy: data coordinates for the selected item
persist: if False, then only one tooltip is displayed at a time. If true,
each tip can be toggled by clicking the individual item.
returns an object which when called always returns the most recent index chosen
"""
last_idx = None
def __init__(self,coll,tips=None,ax=None,persist=False,points=5):
ax = ax or coll.axes # older MPL: get_axes()
self.ax=ax
self.coll=coll
coll.set_picker(points)
self.persist=persist
self.tips=tips
self.texts={} # map index to Text
self.cid = self.ax.figure.canvas.mpl_connect('pick_event',self.onpick)
def __call__(self):
return self.last_idx
def onpick(self,event):
coll=self.coll
ax=self.ax
if event.artist == coll:
idx = event.ind[0]
if idx in self.texts: # toggle off
ax.texts.remove(self.texts.pop(idx))
ax.figure.canvas.draw()
return
if not self.persist: # also toggle off anybody currently shown
for k,txt in self.texts.items():
ax.texts.remove(txt)
self.texts={}
if hasattr(coll,'get_offsets'): # for scatter plots
xy=coll.get_offsets()[idx]
elif hasattr(coll,'get_xydata'): # for lines
xy=coll.get_xydata()[idx]
else:
print("Can't figure out xy location!")
return
kws=dict(ax=ax,coll=coll,event=event,dataxy=xy)
if self.tips is None:
tip_str=str(idx)
elif isinstance(self.tips,list):
tip_str=self.tips[idx]
else:
tip_str=self.tips(idx,**kws)
tt_text=ax.text(xy[0],xy[1],tip_str,
transform=ax.transData,
va='bottom',ha='left',
bbox=dict(facecolor=(1.0,1.0,0.5),ec='k',lw=0.5))
self.texts[idx]=tt_text
ax.figure.canvas.draw()
def gpick(coll,*args,**kwargs):
""" Given a collection, wait for a pick click, and return the id
of the picked object within the collection
"""
picked_id=[-1]
def my_cb(idx,**kwarg):
picked_id[0]=idx
def on_close(event):
picked_id[0]=None
fig=coll.get_figure()
cid=fig.canvas.mpl_connect('close_event', on_close)
picker=enable_picker(coll,cb=my_cb,**kwargs)
while picked_id[0]==-1:
plt.pause(0.01)
fig.canvas.mpl_disconnect(cid)
fig.canvas.mpl_disconnect(picker.cid)
return picked_id[0]
def sqrt_scale(mappable,cbar,fmt="%.2f"):
mappable.set_array(np.sqrt(mappable.get_array()))
def map_and_fmt(x,pos):
return fmt%(x**2)
cbar.formatter = ticker.FuncFormatter(map_and_fmt)
cbar.update_ticks()
def period_labeler(freq,base_unit='h'):
assert base_unit=='h' # or fix!
if freq==0.0:
return "DC"
period_h=1./freq
if period_h>30:
return "%.2fd"%(period_h/24.)
elif period_h>1:
return "%.2fh"%(period_h)
else:
return "%.2fm"%(period_h*60)
def period_scale(xaxis,base_unit='h'):
def flabel(f_per_h,pos):
return period_labeler(f_per_h,base_unit)
fmter=FuncFormatter(flabel)
xaxis.set_major_formatter(fmter)
def pad_pcolormesh(X,Y,Z,ax=None,dx_single=None,dy_single=None,
fallback=True,**kwargs):
""" Rough expansion of X and Y to be the bounds of
cells, rather than the middles. Reduces the shift error
in the plot.
dx_single: if there is only one sample in x, use this for width
dy_single: if there is only one sample in y, use this for height
fallback: if there are nan's in the coordinate data use pcolor instead.
"""
Xpad,Ypad=utils.center_to_edge_2d(X,Y,dx_single=dx_single,dy_single=dy_single)
ax=ax or plt.gca()
if fallback and (np.any(np.isnan(Xpad)) or np.any(np.isnan(Ypad))):
return ax.pcolor(Xpad,Ypad,Z,**kwargs)
else:
return ax.pcolormesh(Xpad,Ypad,Z,**kwargs)
def show_slopes(ax=None,slopes=[-5./3,-1],xfac=5,yfac=3):
ax = ax or plt.gca()
x = np.median( [l.get_xdata()[-1] for l in ax.lines] )
y = np.max( [l.get_ydata()[-1] for l in ax.lines] )
y *= yfac # set the legend above the plotted lines
xs = np.array([x/xfac,x])
for s in slopes:
ys = np.array([y/xfac**s,y])
ax.loglog(xs,ys,c='0.5')
plt.annotate("%g"%s,[xs[0],ys[0]])
# interactive log-log slope widget:
class Sloper(object):
def __init__(self,ax=None,slope=-5./3,xfac=5,yfac=3,xlog=True,ylog=True,x=None,y=None):
self.slope = slope
self.ax = ax or plt.gca()
if x is None:
x = np.median( [l.get_xdata()[-1] for l in self.ax.lines] )
if y is None:
y = np.max( [l.get_ydata()[-1] for l in self.ax.lines] )
y *= yfac # set the legend above the plotted lines
self.xlog = xlog
self.ylog = ylog
xs = np.array([x/xfac,x])
ys = np.array([y/xfac**slope,y])
if self.xlog and self.ylog:
self.line = self.ax.loglog(xs,ys,c='0.5',picker=5)[0]
elif not self.xlog and not self.ylog:
self.line = self.ax.plot(xs,ys,c='0.5',picker=5)[0]
self.text = self.ax.text(xs[0],1.5*ys[0],"%g"%self.slope,transform=self.ax.transData)
self.ax.figure.canvas.mpl_connect('pick_event',self.onpick)
self.drag = dict(cid=None,x=None,y=None)
def onpick(self,event):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
print('onpick points:', list(zip(xdata[ind], ydata[ind])))
print(' mouse point: ', event.mouseevent.xdata,event.mouseevent.ydata)
cid = self.ax.figure.canvas.mpl_connect('button_release_event',self.drag_end)
if self.drag['cid'] is not None:
self.ax.figure.canvas.mpl_disconnect(self.drag['cid'])
self.drag = dict(cid=cid,x=event.mouseevent.xdata,y=event.mouseevent.ydata)
yoff = 1.5
def update_text_pos(self):
x = self.line.get_xdata()[0]
y = self.line.get_ydata()[0]
self.text.set_x(x)
if self.ylog:
self.text.set_y(self.yoff*y)
else:
self.text.set_y(self.yoff+y)
def drag_end(self,event):
print("drag end")
self.ax.figure.canvas.mpl_disconnect(self.drag['cid'])
xdata = self.line.get_xdata()
ydata = self.line.get_ydata()
if self.xlog:
xdata *= event.xdata / self.drag['x']
else:
xdata += (event.xdata - self.drag['x'])
if self.ylog:
ydata *= event.ydata / self.drag['y']
else:
ydata += event.ydata - self.drag['y']
self.line.set_xdata(xdata)
self.line.set_ydata(ydata)
self.update_text_pos()
event.canvas.draw()
def function_contours(f=lambda x,y: x-y,ax=None,Nx=20,Ny=20,V=10,
fmt=None):
""" Cheap way to draw contours of a function and label them.
Just evaluates the function on a grid and calls contour
"""
ax = ax or plt.gca()
xxyy = ax.axis()
x = np.linspace(xxyy[0],xxyy[1],Nx)
y = np.linspace(xxyy[2],xxyy[3],Ny)
X,Y = np.meshgrid(x,y)
Z = f(X,Y)
ctr = plt.contour(X,Y,Z,V,colors='k')
if fmt:
ax.clabel(ctr,fmt=fmt)
return ctr
def bode(G,f=np.arange(.01,100,.01),fig=None):
fig = fig or plt.figure()
jw = 2*np.pi*f*1j
y = np.polyval(G.num, jw) / np.polyval(G.den, jw)
mag = 20.0*np.log10(abs(y))
phase = np.arctan2(y.imag, y.real)*180.0/np.pi % 360
plt.subplot(211)
#plt.semilogx(jw.imag, mag)
plt.semilogx(f,mag)
plt.grid()
plt.gca().xaxis.grid(True, which='minor')
plt.ylabel(r'Magnitude (db)')
plt.subplot(212)
#plt.semilogx(jw.imag, phase)
plt.semilogx(f,phase)
plt.grid()
plt.gca().xaxis.grid(True, which='minor')
plt.ylabel(r'Phase (deg)')
plt.yticks(np.arange(0, phase.min()-30, -30))
return mag, phase
# Courtesy http://stackoverflow.com/questions/41597177/get-aspect-ratio-of-axes
# Mad Physicist
from operator import sub
def get_aspect(ax):
# Total figure size
figW, figH = ax.get_figure().get_size_inches()
# Axis size on figure
_, _, w, h = ax.get_position().bounds
# Ratio of display units
disp_ratio = (figH * h) / (figW * w)
# Ratio of data units
# Negative over negative because of the order of subtraction
data_ratio = sub(*ax.get_ylim()) / sub(*ax.get_xlim())
return disp_ratio / data_ratio
def annotate_line(l,s,norm_position=0.5,offset_points=10,ax=None,
buff=None,**kwargs):
"""
line: a matplotlib line object
s: string to show
norm_position: where along the line, normalized to [0,1]
offset_points: how to offset the text baseline relative to the line.
buff: options to draw a buffer around text. foreground, linewidth
"""
ax=ax or plt.gca()
x,y = l.get_data()
deltas = np.sqrt(np.diff(x)**2 + np.diff(y)**2)
deltas = np.concatenate( ([0],deltas) )
dists = np.cumsum(deltas)
abs_position = norm_position*dists[-1]
if norm_position < 0.99:
abs_position2 = (norm_position+0.01)*dists[-1]
else:
abs_position2 = (norm_position-0.01)*dists[-1]
x_of_label = np.interp(abs_position,dists,x)
y_of_label = np.interp(abs_position,dists,y)
asp=get_aspect(ax)
dx = np.interp(abs_position2,dists,x) - x_of_label
dy = np.interp(abs_position2,dists,y) - y_of_label
angle = np.arctan2(asp*dy,dx)*180/np.pi
perp = np.array([-dy,dx])
perp = offset_points * perp / utils.mag(perp)
settings=dict(xytext=perp, textcoords='offset points',
rotation=angle,xycoords=l.get_transform(),
ha='center',va='center')
settings.update(kwargs)
if buff is not None:
settings['path_effects']=[pe.withStroke(**buff)]
t=ax.annotate(s,[x_of_label,y_of_label],**settings)
return t
def klabel(k,txt,color='0.5',ax=None,y=None,**kwargs):
ax = ax or plt.gca()
ax.axvline(k,color=color)
args=dict(rotation=90,va='bottom',ha='right',
color=color)
args.update(kwargs)
if y is None:
y=0.02+0.6*np.random.random()
return ax.text(k,y,txt,
transform=ax.get_xaxis_transform(),
**args)
class ScaleXOnly(Transform):
""" Given a base transform, the x origin is preserved
but x scaling is taken from a second transformation.
An optional xoffset is applied to the origin.
Useful for having a scalebar with one side in a consistent
location in ax.transAxes coordinates, but the length of
the bar adjusts with the data transformation.
"""
is_separable=True
input_dims = 2
output_dims = 2
pass_through=True # ?
def __init__(self,origin,xscale,xoffset=None,**kwargs):
Transform.__init__(self, **kwargs)
self._origin=origin
self._xscale=xscale or 0.0
self._xoffset=xoffset
assert(xscale.is_affine)
assert(xscale.is_separable)
self.set_children(origin,xscale)
self._affine = None
def __eq__(self, other):
if isinstance(other, ScaleXOnly):
return (self._origin == other._origin) and (self._xscale==other._xscale) and \
(self._xoffset == other._xoffset)
else:
return NotImplemented
def contains_branch_seperately(self, transform):
x,y=self._origin.contains_branch_seperately(transform)
xs,ys=self._xscale.contains_branch_seperately(transform)
return (x or xs,y)
@property
def depth(self):
return max([self._origin.depth,self._xscale.depth])
def contains_branch(self, other):
# ??
# a blended transform cannot possibly contain a branch from two different transforms.
return False
@property
def is_affine(self):
# scale is always affine
return self._origin.is_affine
def frozen(self):
return ScaleXOnly(self._origin.frozen(),self._xscale.frozen(),self._xoffset)
def __repr__(self):
return "ScaleXOnly(%s)" % (self._origin,self._xscale,self._xoffset)
def transform_non_affine(self, points):
if self._origin.is_affine:
return points
else:
return self._origin.transform_non_affine(points)
# skip inversion for now.
def get_affine(self):
# for testing, do nothing here.
if self._invalid or self._affine is None:
mtx = self._origin.get_affine().get_matrix()
mtx_xs = self._xscale.get_affine().get_matrix()
# 3x3
# x transform is the first row
mtx=mtx.copy()
mtx[0,2] += mtx[0,0]*self._xoffset # x offset in the origin transform
mtx[0,0]=mtx_xs[0,0] # overrides scaling of x
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
def right_align(axs):
xmax=min( [ax.get_position().xmax for ax in axs] )
for ax in axs:
p=ax.get_position()
p.p1[0]=xmax
ax.set_position(p)
def cbar(*args,**kws):
extras=kws.pop('extras',[])
symmetric=kws.pop('sym',False)
cbar=plt.colorbar(*args,**kws)
cbar_interactive(cbar,extras=extras,symmetric=symmetric)
return cbar
def cbar_interactive(cbar,extras=[],symmetric=False):
""" click in the upper or lower end of the colorbar to
adjust the respective limit.
left click to increase, right click to decrease
extras: additional mappables. When the norm is changed,
these will get set_norm(norm) called.
returns the callback.
There is the possibility of callbacks being garbage collected
if a reference is not retained, so it is recommended to keep
the callback will attempt to remove itself if the colorbar
artists disappear from the cax.
"""
mappables=[cbar.mappable] + extras
original_clim=[cbar.mappable.norm.vmin,cbar.mappable.norm.vmax]
def mod_norm(rel_min=0,rel_max=0,reset=False):
nrm=cbar.mappable.norm
if reset:
nrm.vmin,nrm.vmax = original_clim
else:
rang=nrm.vmax - nrm.vmin
nrm.vmax += rel_max*rang
nrm.vmin += rel_min*rang
[m.set_norm(nrm) for m in mappables]
plt.draw()
cid=None
def cb_u_cbar(event=None):
if event is None or \
(cbar.solids is not None and \
cbar.solids not in cbar.ax.get_children()):
# print "This cbar is no longer relevant. Removing callback %s"%cid
fig.canvas.mpl_disconnect(cid)
return
if event.inaxes is cbar.ax:
if cbar.orientation=='vertical':
coord=event.ydata
else:
coord=event.xdata
if event.button==1:
rel=0.1
elif event.button==3:
rel=-0.1
if symmetric:
mod_norm(rel_min=rel,rel_max=-rel)
else:
if coord<0.4:
mod_norm(rel_min=rel)
elif coord>0.6:
mod_norm(rel_max=rel)
else:
mod_norm(reset=True)
fig=cbar.ax.figure
cid=fig.canvas.mpl_connect('button_press_event',cb_u_cbar)
return cb_u_cbar
def rgb_key(vel_scale,ax):
# key for vel_rgb
# syn_Mag,syn_Dir are for quad corners -
syn_mag=syn_dir=np.linspace(0,1,20)
syn_Mag,syn_Dir=np.meshgrid(syn_mag,syn_dir)
# syn_cMag, syn_cDir are for quad centers
syn_cMag,syn_cDir=np.meshgrid( 0.5*(syn_mag[1:]+syn_mag[:-1]),
0.5*(syn_dir[1:]+syn_dir[:-1]) )
syn_u=vel_scale*syn_cMag*np.cos(syn_cDir*2*np.pi)
syn_v=vel_scale*syn_cMag*np.sin(syn_cDir*2*np.pi)
syn_rgb=vec_to_rgb(syn_u,syn_v,vel_scale)
# not sure why Y has to be negated..
syn_X=syn_Mag*np.cos(syn_Dir*2*np.pi)
syn_Y=syn_Mag*np.sin(syn_Dir*2*np.pi)
ax.cla()
rgb_ravel=syn_rgb.reshape( [-1,3] )
rgba_ravel=rgb_ravel[ :, [0,1,2,2] ]
rgba_ravel[:,3]=1.0
coll=ax.pcolormesh( syn_X,syn_Y,syn_rgb[:,:,0],
facecolors=rgba_ravel)
coll.set_array(None)
coll.set_facecolors(rgba_ravel)
ax.xaxis.set_visible(0)
ax.yaxis.set_visible(0)
ax.set_frame_on(0)
ax.text(0.5,0.5,'max %g'%vel_scale,transform=ax.transAxes)
ax.axis('equal')
def vec_to_rgb(U,V,scale):
h=(np.arctan2(V,U)/(2*np.pi)) % 1.0
s=(np.sqrt(U**2+V**2)/scale).clip(0,1)
v=1.0*np.ones_like(h)
bad=~(np.isfinite(U) & np.isfinite(V))
h[bad]=0
s[bad]=0
v[bad]=0.5
i = (h*6.0).astype(int)
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
r = i.choose( v, q, p, p, t, v, 0.5 )
g = i.choose( t, v, v, q, p, p, 0.5 )
b = i.choose( p, p, t, v, v, q, 0.5 )
# not portable to other shapes...
rgb = np.asarray([r,g,b]).transpose(1,2,0)
return rgb
def savefig_geo(fig,fn,*args,**kws):
# Not really tested...
# Add annotations for the frontal zones:
from PIL import Image
fig.savefig(fn,*args,**kws)
# get the image resolution:
img_size=Image.open(fn).size
w_fn=fn+'w'
xxyy=fig.axes[0].axis()
xpix=(xxyy[1] - xxyy[0]) / img_size[0]
ypix=(xxyy[3] - xxyy[2]) / img_size[1]
with open(w_fn,'wt') as fp:
for v in [xpix,0,0,-ypix,xxyy[0],xxyy[3]]:
fp.write("%f\n"%v)
# Transect methods:
def transect_tricontourf(*args,
**kwargs):
"""
xcoord, ycoord: name of the respective coordinate variables.
positive_down: how to handle a ycoord with a positive:down attribute.
"negate" -- negate the sign of the coordinate
"flip" -- reverse the axis in matplotlib
"none" -- ignore
"""
return transect_tricontour_gen(*args,
style='fill',
**kwargs)
def transect_tricontour(*args,
**kwargs):
"""
xcoord, ycoord: name of the respective coordinate variables.
positive_down: how to handle a ycoord with a positive:down attribute.
"negate" -- negate the sign of the coordinate
"flip" -- reverse the axis in matplotlib
"none" -- ignore
"""
return transect_tricontour_gen(*args,
style='line',
**kwargs)
def transect_tricontour_gen(data,xcoord,ycoord,V=None,
elide_missing_columns=True,sortx=True,
positive_down='negate',style='fill',
**kwargs):
"""
xcoord, ycoord: name of the respective coordinate variables.
positive_down: how to handle a ycoord with a positive:down attribute.
"negate" -- negate the sign of the coordinate
"flip" -- reverse the axis in matplotlib
"none" -- ignore
style: 'fill' for contourf, 'line' for contour
"""
ax=kwargs.pop('ax',None)
if ax is None:
ax=plt.gca()
y_scale=1
do_flip=False
if data[ycoord].attrs.get('positive',"")=='down':
if positive_down=='negate':
y_scale=-1
elif positive_down=='flip':
do_flip=True
tri,mapper = transect_to_triangles(data,xcoord,ycoord,
elide_missing_columns=elide_missing_columns,
y_scale=y_scale,
sortx=sortx)
if tri is None:
return None
if V is not None:
args=[V]
else:
args=[]
if style=='fill':
coll=ax.tricontourf(tri,mapper(data.values),*args,**kwargs)
elif style=='line':
coll=ax.tricontour(tri,mapper(data.values),*args,**kwargs)
else:
raise Exception("Unknown style %s"%style)
# Seems that mpl does not autoscale for contoursets.
xmin,xmax,ymin,ymax=ax.axis()
if ymin>ymax:
do_flip=True
ymin,ymax=ymax,ymin
if True: # ax.get_autoscalex_on():
xmin=min(xmin,data[xcoord].min())
xmax=max(xmax,data[xcoord].max())
if True: # ax.get_autoscaley_on():
ymin=min(ymin,(y_scale*data[ycoord]).min())
ymax=max(ymax,(y_scale*data[ycoord]).max())
if do_flip:
ymin,ymax=ymax,ymin
ax.axis(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax)
return coll
def transect_to_triangles(data,xcoord,ycoord,
elide_missing_columns=True,
sortx=True,y_scale=1.0):
"""
data: xarray DataArray with two dimensions, first assumed to be
horizontal, second vertical.
xcoord,ycoord: each 1D or 2D, to be expanded as needed.
Can be either the name of the respective coordinate in data,
or an array to be used directly.
elide_missing: if True, a first step drops columns (profiles) for
which there is no valid data. Otherwise, these columns will be
shown as blanks. Note that missing columns at the beginning or
end are not affected by this
sortx: force x coordinate to be sorted low to high.
y_scale: apply scaling factor, possibly negative, to y coordinate.
(useful for flipping soundings vs. elevations)
return (triangulation,mapper)
such that tricontourf(triangulation,mapper(data.values))
generates a transect plot
triangles are omitted based on entries in data being nan.
"""
xdim,ydim=data.dims # assumption of the order!
# conditional meshgrid, more or less.
if isinstance(xcoord,string_types):
X=data[xcoord].values
else:
X=xcoord
if isinstance(ycoord,string_types):
Y=data[ycoord].values
else:
Y=ycoord
if X.ndim==1:
X=X[:,None] * np.ones_like(data.values)
if Y.ndim==1:
Y=Y[None,:] * np.ones_like(data.values)
Y=y_scale*Y
# build up a triangulation and mapping for better display
# assumed the triangles should be formed only between consecutive
triangles=[] # each triangle is defined by a triple of index pairs
data_vals=data.values
valid=np.isfinite(data_vals) # or consult mask
xslice=np.arange(X.shape[0])
if sortx:
xslice=xslice[np.argsort(X[:,0])]
if elide_missing_columns:
valid_cols=np.any(valid[xslice,:],axis=1)
xslice=xslice[valid_cols]
# now xslice is an index array, in the right order, with
# the right subset.
X=X[xslice,:]
Y=Y[xslice,:]
data_vals=data_vals[xslice,:]
valid=valid[xslice,:]
for xi in range(data_vals.shape[0]-1):
# initialize left_y and right_y to first valid datapoints
for left_y in range(data_vals.shape[1]):
if valid[xi,left_y]:
break
else:
continue
for right_y in range(data_vals.shape[1]):
if valid[xi+1,right_y]:
break
else:
continue
for yi in range(1+min(left_y,right_y),data_vals.shape[1]):
if (yi>left_y) and valid[xi,yi]:
triangles.append( [[xi,left_y],[xi,yi],[xi+1,right_y]] )
left_y=yi
if valid[xi+1,yi]:
triangles.append( [[xi,left_y],[xi+1,yi],[xi+1,right_y]] )
right_y=yi
if len(triangles)==0:
return None,None # triangles=np.zeros((0,3),'i4')
else:
triangles=np.array(triangles)
nx=X.ravel()
ny=Y.ravel()
ntris=triangles[...,0]*X.shape[1] + triangles[...,1]
tri=Triangulation(nx, ny, triangles=ntris)
def trimap(data2d,xslice=xslice):
rav=data2d[xslice,:].ravel().copy()
# some tri functions like contourf don't deal well with
# nan values.
invalid=np.isnan(rav)
rav[invalid]=-999
rav=np.ma.array(rav,mask=invalid)
return rav
return tri,trimap
def inset_location(inset_ax,overview_ax):
extents=inset_ax.axis()
rect=Rectangle([extents[0],extents[2]],
width=extents[1] - extents[0],
height=extents[3] -extents[2],
lw=0.5,ec='k',fc='none')
overview_ax.add_patch(rect)
return rect
class RotatedPathCollection(collections.PathCollection):
_factor = 1.0
def __init__(self, paths, sizes=None, angles=None, **kwargs):
"""
*paths* is a sequence of :class:`matplotlib.path.Path`
instances.
%(Collection)s
"""
collections.PathCollection.__init__(self,paths,sizes,**kwargs)
self.set_angles(angles)
self.stale = True
def set_angles(self, angles):
if angles is None:
self._angles = np.array([])
else:
self._angles = np.asarray(angles)
orig_trans=self._transforms
new_angles=self._angles
if len(self._transforms)==1 and len(self._angles)==1:
pass
else:
if len(self._transforms)==1:
orig_trans=[self._transforms] * len(self._angles)
new_angles=self._angles
if len(self._angles)==1:
orig_trans=self.transforms
new_angles=[self._angles] * len(self._transforms)
self._transforms = [
transforms.Affine2D(x).rotate(angle).get_matrix()
for x,angle in zip(orig_trans,new_angles)
]
self.stale = True
def draw(self, renderer):
self.set_sizes(self._sizes, self.figure.dpi)
self.set_angles(self._angles)
collections.Collection.draw(self, renderer)
def fat_quiver(X,Y,U,V,ax=None,**kwargs):
U=np.asarray(U)
V=np.asarray(V)
mags=np.sqrt( (U**2 + V**2) )
angles_rad=np.arctan2( V, U )
ax=ax or plt.gca()
# right facing fat arrow
L=0.333
Hw=0.1667
Sw=0.0833
Hl=0.1667
pivot=kwargs.pop('pivot','tail')
if pivot=='tip':
dx=0
elif pivot=='tail':
dx=L+Hl
elif pivot in ('middle','center'):
dx=(L+Hl)/2.0
marker =(path.Path(np.array( [ [dx + 0 ,0],
[dx - Hl,-Hw],
[dx - Hl,-Sw],
[dx - Hl-L,-Sw],
[dx - Hl-L,Sw],
[dx - Hl,Sw],
[dx - Hl,Hw],
[dx + 0,0] ] ), None),)
mags=np.sqrt( np.asarray(U)**2 + np.asarray(V)**2 )
angles=np.arctan2(np.asarray(V),np.asarray(U))
scale=kwargs.pop('scale',1)
if 'color' in kwargs and 'facecolor' not in kwargs:
# otherwise sets edges and faces
kwargs['facecolor']=kwargs.pop('color')
# sizes is interpreted as an area - i.e. the linear scale
# is sqrt(size)
coll=RotatedPathCollection(paths=marker,
offsets=np.array([X,Y]).T,
sizes=mags*50000/scale,
angles=angles,
transOffset=ax.transData,
**kwargs)
trans = transforms.Affine2D().scale(ax.figure.dpi / 72.0)
coll.set_transform(trans) # the points to pixels transform
ax.add_collection(coll)
return coll
def reduce_text_overlap(ax,max_iter=200):
"""
Try to shift the texts in an axes to avoid overlaps.
"""
texts=ax.texts
# in scripts window extents may not be available until a draw
# is forced
try:
texts[0].get_window_extent()
except RuntimeError:
# so force it.
ax.get_figure().canvas.draw()
bboxes=[]
for txt in texts:
ext=txt.get_window_extent()
bboxes.append( [ [ext.xmin,ext.ymin],
[ext.xmax,ext.ymax] ] )
bboxes=np.array(bboxes)
# each iteration move overlapping texts by about this
# much
dx=2.0
# come up with pixel offsets for each label.
pix_offsets=np.zeros( (len(texts),1,2),np.float64 )
for _ in range(max_iter):
changed=False
new_bboxes=bboxes+pix_offsets
for a in range(len(texts)):
for b in range(a+1,len(texts)):
# is there any force between these two labels?
# check overlap
int_min=np.maximum(new_bboxes[a,0,:], new_bboxes[b,0,:])
int_max=np.minimum(new_bboxes[a,1,:], new_bboxes[b,1,:])
if np.all(int_min<int_max):
#print("Collision %s - %s"%(texts[a].get_text(),
# texts[b].get_text()))
# This could probably be faster and less verbose.
# separate axis is taken from the overlapping region
# and direction
# choose the direction that most quickly eliminates the overlap
# area. could also just choose the least overlapping direction
opt=utils.to_unit( np.array( [int_max[1]-int_min[1],
int_max[0]-int_min[0]]) )
ab=new_bboxes[b].mean(axis=0) - new_bboxes[a].mean(axis=0)
if np.dot(opt,ab)<0:
opt*=-1
pix_offsets[a,0,:] -= dx*opt/2
pix_offsets[b,0,:] += dx*opt/2
changed=True
if not changed:
break
# Update positions of the texts:
deltas=np.zeros((len(texts),2),np.float64)
for i in range(len(texts)):
txt=texts[i]
xform=txt.get_transform()
ixform=xform.inverted()
p=bboxes[i,0,:]
pos0=ixform.transform_point(p)
pos_new=ixform.transform_point(p+pix_offsets[i,0,:])
deltas[i]=delta=pos_new-pos0
txt.set_position( np.array(txt.get_position()) + delta)
return deltas
| mit |
davidsoncasey/quiver-server | plot_equation.py | 1 | 3888 | from __future__ import division
import re
from math import sqrt
import multiprocessing
import Queue
import sympy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
class DiffEquation(object):
'''
Class that contains equation information and, if the equation is valid,
prepares the plot.
'''
def __init__(self, equation_string):
self.equation_string = equation_string
self.equation = None
self.compute_func = None
self.figure = None
def regex_check(self):
'''A quick regular expression check to see that the input resembles an equation'''
match1 = re.match('^(([xy+\-*/()0-9. ]+|sin\(|cos\(|exp\(|log\()?)+$', self.equation_string)
match2 = re.match('^.*([xy]) *([xy]).*$', self.equation_string)
return match1 and not match2
def prep_equation(self):
'''
Attempt to convert the string to a SymPy function.
From there, use lambdify to generate a function that is efficient to compute
numerically.
'''
if self.regex_check():
q = multiprocessing.Queue()
def prep(conn):
try:
equation = sympy.sympify(self.equation_string)
q.put(equation)
except sympy.SympifyError:
q.put(None)
p = multiprocessing.Process(target=prep, args=(q,))
p.start()
# See if we can get the equation within 5 seconds
try:
equation = q.get(timeout=3)
except Queue.Empty:
equation = None
q.close()
# If the process is still running, kill it
if p.is_alive():
p.terminate()
p.join()
if equation:
self.equation = equation
x, y = sympy.symbols('x,y')
compute_func = sympy.utilities.lambdify((x, y), self.equation)
self.compute_func = compute_func
def make_plot(self):
'''Draw the plot on the figure attribute'''
if self.compute_func:
xvals, yvals = np.arange(-10, 11, 1), np.arange(-10, 11, 1)
X, Y = np.meshgrid(xvals, yvals)
U, V = np.meshgrid(np.zeros(len(xvals)), np.zeros(len(yvals)))
# Iterate through grid and compute function value at each point
# If value cannot be computed, default to 0
# If value can be computed, scale by sqrt of the magnitude
for i, a in enumerate(xvals):
for j, b in enumerate(yvals):
dx = 1
try:
dy = self.compute_func(a, b)
n = sqrt(dx + dy**2)
dy /= sqrt(n)
dx /= sqrt(n)
U[j][i] = dx
V[j][i] = dy
except (ValueError, ZeroDivisionError):
pass
# Plot the values
self.figure = plt.Figure()
axes = self.figure.add_subplot(1,1,1)
axes.quiver(X, Y, U, V, angles='xy', color='b', edgecolors=('k',))
axes.axhline(color='black')
axes.axvline(color='black')
latex = sympy.latex(self.equation)
axes.set_title(r'Direction field for $\frac{dy}{dx} = %s$' % latex, y=1.01)
def write_data(self, output):
'''Write the data out as base64 binary'''
if self.figure:
canvas = FigureCanvas(self.figure)
self.figure.savefig(output, format='png', bbox_inches='tight')
output.seek(0)
return output.getvalue()
return None
| mit |
shernshiou/CarND | Term1/05-CarND-Vehicle-Detection/vehicle_detection.py | 1 | 3518 | import glob
import cv2
import numpy as np
import os
from util.draw import generate_sliding_windows
from util.draw import extract_heatmap
from util.classifier import svm_classifier
from util.classifier import transform_features
from sklearn.preprocessing import StandardScaler
from moviepy.editor import VideoFileClip
heatmap_buffer = np.zeros((10,720,1280))
heatmap_idx = 0
def video_pipeline(img):
global heatmap_buffer
global heatmap_idx
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
heatmap = np.zeros_like(img[:,:,0]).astype(np.float)
windows = generate_sliding_windows(img.shape)
for bboxes in windows:
for bbox in bboxes:
roi = cv2.resize(img[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]], (64, 64))
features = transform_features(roi, cspace='YUV', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel='ALL')
scaled_features = X_scaler.transform(features)
prediction = clf.predict(scaled_features)[0]
if prediction > 0:
# cv2.rectangle(imgcopy, bbox[0], bbox[1], color=(0, 255, 0))
heatmap[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]] += 1
heatmap_buffer[heatmap_idx] = heatmap
heatmap_idx = (heatmap_idx + 1) % 10
avgheatmap = 0.3 * heatmap + 0.7 * np.mean(heatmap_buffer, axis=0)
avgheatmap[avgheatmap <= 3] = 0 # Filter heatmap
# heatmap = heatmap * 250 / 50
# heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_HOT)
extracted = extract_heatmap(img, avgheatmap)
extracted = cv2.cvtColor(extracted, cv2.COLOR_BGR2RGB)
return extracted
if __name__ == "__main__":
images = glob.glob('./test_images/*.jpg')
scaler = np.load('scaler.npz')
X_scaler = StandardScaler()
X_scaler.mean_, X_scaler.scale_ = scaler['mean'], scaler['scale']
clf = svm_classifier()
for image in images:
# cnt = 0
filename = os.path.splitext(os.path.basename(image))[0]
test_image = cv2.imread(image)
imgcopy = np.copy(test_image)
heatmap = np.zeros_like(imgcopy[:,:,0]).astype(np.float)
windows = generate_sliding_windows(test_image.shape)
for bboxes in windows:
for bbox in bboxes:
roi = cv2.resize(test_image[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]], (64, 64))
features = transform_features(roi, cspace='YUV', orient=9, pix_per_cell=8, cell_per_block=2, hog_channel='ALL')
scaled_features = X_scaler.transform(features)
prediction = clf.predict(scaled_features)[0]
if prediction > 0:
cv2.rectangle(imgcopy, bbox[0], bbox[1], color=(0, 255, 0))
heatmap[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]] += 1
# cv2.imwrite('./output_images/' + filename + str(cnt) + '.jpg', roi)
# cnt += 1
heatmap[heatmap <= 3] = 0 # Filter heatmap
heatmap = heatmap * 250 / 50
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_HOT)
extracted = extract_heatmap(test_image, heatmap)
cv2.imwrite('./output_images/' + filename + '_heat.jpg', heatmap)
cv2.imwrite('./output_images/' + filename + '_extracted.jpg', extracted)
cv2.imwrite('./output_images/' + filename + '.jpg', imgcopy)
# Video
clip = VideoFileClip('./project_video.mp4')
output_clip = clip.fl_image(video_pipeline)
output_clip.write_videofile('./output_images/project_video99.mp4', audio=False)
| mit |
vibhorag/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
BhallaLab/moose-full | moose-examples/snippets/MULTI/minchan.py | 3 | 12176 | # minimal.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# Minimal model for loading rdesigneur: reac-diff elec signaling in neurons
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'mincell2.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 6e-6 # Aim for 2 soma compartments.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 1 ) # soma/dend only
assert( ndc == 2 ) # split into 2.
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == 2 )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 1 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
assert( smstoich.numAllPools == 3 )
assert( smdsolve.numPools == 3 )
assert( smdsolve.numAllVoxels == 1 )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 1 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
assert( pmstoich.numAllPools == 3 )
assert( pmdsolve.numPools == 3 )
assert( pmdsolve.numAllVoxels == 1 )
foo = moose.element( '/model/chem/psd/Ca' )
print 'PSD: numfoo = ', foo.numData
print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels
# Put in junctions between the diffusion solvers
nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
# set up adaptors
aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', ndc )
adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )
chemCa = moose.vec( '/model/chem/dend/DEND/Ca' )
print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( adaptCa ) == ndc )
assert( len( chemCa ) == ndc )
path = '/model/elec/soma/Ca_conc'
elecCa = moose.element( path )
print "=========="
print elecCa
print adaptCa
print chemCa
moose.connect( elecCa, 'concOut', adaptCa[0], 'input', 'Single' )
moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 1e-3 # 520 to 0.0052 mM
#print adaptCa.outputOffset
#print adaptCa.scale
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print "addPlot failed: object is a Neutral: ", objpath
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print "addPlot failed: object not found: ", objpath
return moose.element( '/' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head', 'getVm', 'elec/spineVm' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'elec/somaCa' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/Ca', 'getConc', 'chem/spineCa' )
addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
def testNeuroMeshMultiscale():
elecDt = 50e-6
chemDt = 0.01
ePlotDt = 0.5e-3
cPlotDt = 0.01
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
"""
for i in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if ( i[0].diffConst > 0 ):
grandpaname = i.parent[0].parent.name + '/'
paname = i.parent[0].name + '/'
print grandpaname + paname + i[0].name, i[0].diffConst
print 'Neighbors:'
for t in moose.element( '/model/chem/spine/ksolve/junction' ).neighbors['masterJunction']:
print 'masterJunction <-', t.path
for t in moose.wildcardFind( '/model/chem/#/ksolve' ):
k = moose.element( t[0] )
print k.path + ' localVoxels=', k.numLocalVoxels, ', allVoxels= ', k.numAllVoxels
"""
'''
moose.useClock( 4, '/model/chem/dend/dsolve', 'process' )
moose.useClock( 5, '/model/chem/dend/ksolve', 'process' )
moose.useClock( 5, '/model/chem/spine/ksolve', 'process' )
moose.useClock( 5, '/model/chem/psd/ksolve', 'process' )
'''
makeChemPlots()
makeElecPlots()
moose.setClock( 0, elecDt )
moose.setClock( 1, elecDt )
moose.setClock( 2, elecDt )
moose.setClock( 4, chemDt )
moose.setClock( 5, chemDt )
moose.setClock( 6, chemDt )
moose.setClock( 7, cPlotDt )
moose.setClock( 8, ePlotDt )
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' )
moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
#moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
#moose.useClock( 4, '/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 4, '/model/chem/#/dsolve', 'process' )
moose.useClock( 5, '/model/chem/#/ksolve', 'process' )
moose.useClock( 6, '/model/chem/dend/DEND/adaptCa', 'process' )
moose.useClock( 7, '/graphs/chem/#', 'process' )
moose.useClock( 8, '/graphs/elec/#', 'process' )
#hsolve = moose.HSolve( '/model/elec/hsolve' )
#moose.useClock( 1, '/model/elec/hsolve', 'process' )
#hsolve.dt = elecDt
#hsolve.target = '/model/elec/compt'
#moose.reinit()
moose.element( '/model/elec/spine_head' ).inject = 5e-12
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
"""
print 'pre'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'dend'
eca = moose.vec( '/model/chem/dend/DEND/Ca' )
#for i in ( 0, 1, 2, 30, 60, 90, 120, 144 ):
for i in range( 13 ):
print i, eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'PSD'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'spine'
eca = moose.vec( '/model/chem/spine/SPINE/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
"""
moose.start( 0.5 )
plt.ion()
fig = plt.figure( figsize=(8,8) )
chem = fig.add_subplot( 211 )
chem.set_ylim( 0, 0.004 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = chem.plot( pos, x.vector, label=x.name )
plt.legend()
elec = fig.add_subplot( 212 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = elec.plot( pos, x.vector, label=x.name )
plt.legend()
fig.canvas.draw()
raw_input()
'''
for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ):
t = numpy.arange( 0, x.vector.size, 1 )
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
'''
pylab.show()
print 'All done'
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
| gpl-2.0 |
pavelchristof/gomoku-ai | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
StongeEtienne/dipy | dipy/data/__init__.py | 1 | 12766 | """
Read test or example data
"""
from __future__ import division, print_function, absolute_import
import sys
import json
from nibabel import load
from os.path import join as pjoin, dirname
import gzip
import numpy as np
from dipy.core.gradients import GradientTable, gradient_table
from dipy.core.sphere import Sphere, HemiSphere
from dipy.sims.voxel import SticksAndBall
from dipy.data.fetcher import (fetch_scil_b0,
read_scil_b0,
fetch_stanford_hardi,
read_stanford_hardi,
fetch_taiwan_ntu_dsi,
read_taiwan_ntu_dsi,
fetch_sherbrooke_3shell,
read_sherbrooke_3shell,
fetch_isbi2013_2shell,
read_isbi2013_2shell,
read_stanford_labels,
fetch_syn_data,
read_syn_data,
fetch_stanford_t1,
read_stanford_t1,
fetch_stanford_pve_maps,
read_stanford_pve_maps,
fetch_viz_icons,
read_viz_icons,
fetch_bundles_2_subjects,
read_bundles_2_subjects,
fetch_cenir_multib,
read_cenir_multib,
fetch_mni_template,
read_mni_template)
from ..utils.arrfuncs import as_native_array
from dipy.tracking.streamline import relist_streamlines
if sys.version_info[0] < 3:
import cPickle
def loads_compat(bytes):
return cPickle.loads(bytes)
else: # Python 3
import pickle
# Need to load pickles saved in Python 2
def loads_compat(bytes):
return pickle.loads(bytes, encoding='latin1')
DATA_DIR = pjoin(dirname(__file__), 'files')
SPHERE_FILES = {
'symmetric362': pjoin(DATA_DIR, 'evenly_distributed_sphere_362.npz'),
'symmetric642': pjoin(DATA_DIR, 'evenly_distributed_sphere_642.npz'),
'symmetric724': pjoin(DATA_DIR, 'evenly_distributed_sphere_724.npz'),
'repulsion724': pjoin(DATA_DIR, 'repulsion724.npz'),
'repulsion100': pjoin(DATA_DIR, 'repulsion100.npz')
}
class DataError(Exception):
pass
def get_sim_voxels(name='fib1'):
""" provide some simulated voxel data
Parameters
------------
name : str, which file?
'fib0', 'fib1' or 'fib2'
Returns
---------
dix : dictionary, where dix['data'] returns a 2d array
where every row is a simulated voxel with different orientation
Examples
----------
>>> from dipy.data import get_sim_voxels
>>> sv=get_sim_voxels('fib1')
>>> sv['data'].shape == (100, 102)
True
>>> sv['fibres']
'1'
>>> sv['gradients'].shape == (102, 3)
True
>>> sv['bvals'].shape == (102,)
True
>>> sv['snr']
'60'
>>> sv2=get_sim_voxels('fib2')
>>> sv2['fibres']
'2'
>>> sv2['snr']
'80'
Notes
-------
These sim voxels were provided by M.M. Correia using Rician noise.
"""
if name == 'fib0':
fname = pjoin(DATA_DIR, 'fib0.pkl.gz')
if name == 'fib1':
fname = pjoin(DATA_DIR, 'fib1.pkl.gz')
if name == 'fib2':
fname = pjoin(DATA_DIR, 'fib2.pkl.gz')
return loads_compat(gzip.open(fname, 'rb').read())
def get_skeleton(name='C1'):
""" provide skeletons generated from Local Skeleton Clustering (LSC)
Parameters
-----------
name : str, 'C1' or 'C3'
Returns
-------
dix : dictionary
Examples
---------
>>> from dipy.data import get_skeleton
>>> C=get_skeleton('C1')
>>> len(C.keys())
117
>>> for c in C: break
>>> sorted(C[c].keys())
['N', 'hidden', 'indices', 'most']
"""
if name == 'C1':
fname = pjoin(DATA_DIR, 'C1.pkl.gz')
if name == 'C3':
fname = pjoin(DATA_DIR, 'C3.pkl.gz')
return loads_compat(gzip.open(fname, 'rb').read())
def get_sphere(name='symmetric362'):
''' provide triangulated spheres
Parameters
------------
name : str
which sphere - one of:
* 'symmetric362'
* 'symmetric642'
* 'symmetric724'
* 'repulsion724'
* 'repulsion100'
Returns
-------
sphere : a dipy.core.sphere.Sphere class instance
Examples
--------
>>> import numpy as np
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric362')
>>> verts, faces = sphere.vertices, sphere.faces
>>> verts.shape == (362, 3)
True
>>> faces.shape == (720, 3)
True
>>> verts, faces = get_sphere('not a sphere name') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DataError: No sphere called "not a sphere name"
'''
fname = SPHERE_FILES.get(name)
if fname is None:
raise DataError('No sphere called "%s"' % name)
res = np.load(fname)
# Set to native byte order to avoid errors in compiled routines for
# big-endian platforms, when using these spheres.
return Sphere(xyz=as_native_array(res['vertices']),
faces=as_native_array(res['faces']))
default_sphere = HemiSphere.from_sphere(get_sphere('symmetric724'))
small_sphere = HemiSphere.from_sphere(get_sphere('symmetric362'))
def get_data(name='small_64D'):
""" provides filenames of some test datasets or other useful parametrisations
Parameters
----------
name : str
the filename/s of which dataset to return, one of:
'small_64D' small region of interest nifti,bvecs,bvals 64 directions
'small_101D' small region of interest nifti,bvecs,bvals 101 directions
'aniso_vox' volume with anisotropic voxel size as Nifti
'fornix' 300 tracks in Trackvis format (from Pittsburgh
Brain Competition)
'gqi_vectors' the scanner wave vectors needed for a GQI acquisitions
of 101 directions tested on Siemens 3T Trio
'small_25' small ROI (10x8x2) DTI data (b value 2000, 25 directions)
'test_piesno' slice of N=8, K=14 diffusion data
'reg_c' small 2D image used for validating registration
'reg_o' small 2D image used for validation registration
'cb_2' two vectorized cingulum bundles
Returns
-------
fnames : tuple
filenames for dataset
Examples
----------
>>> import numpy as np
>>> from dipy.data import get_data
>>> fimg,fbvals,fbvecs=get_data('small_101D')
>>> bvals=np.loadtxt(fbvals)
>>> bvecs=np.loadtxt(fbvecs).T
>>> import nibabel as nib
>>> img=nib.load(fimg)
>>> data=img.get_data()
>>> data.shape == (6, 10, 10, 102)
True
>>> bvals.shape == (102,)
True
>>> bvecs.shape == (102, 3)
True
"""
if name == 'small_64D':
fbvals = pjoin(DATA_DIR, 'small_64D.bvals.npy')
fbvecs = pjoin(DATA_DIR, 'small_64D.gradients.npy')
fimg = pjoin(DATA_DIR, 'small_64D.nii')
return fimg, fbvals, fbvecs
if name == '55dir_grad.bvec':
return pjoin(DATA_DIR, '55dir_grad.bvec')
if name == 'small_101D':
fbvals = pjoin(DATA_DIR, 'small_101D.bval')
fbvecs = pjoin(DATA_DIR, 'small_101D.bvec')
fimg = pjoin(DATA_DIR, 'small_101D.nii.gz')
return fimg, fbvals, fbvecs
if name == 'aniso_vox':
return pjoin(DATA_DIR, 'aniso_vox.nii.gz')
if name == 'fornix':
return pjoin(DATA_DIR, 'tracks300.trk')
if name == 'gqi_vectors':
return pjoin(DATA_DIR, 'ScannerVectors_GQI101.txt')
if name == 'dsi515btable':
return pjoin(DATA_DIR, 'dsi515_b_table.txt')
if name == 'dsi4169btable':
return pjoin(DATA_DIR, 'dsi4169_b_table.txt')
if name == 'grad514':
return pjoin(DATA_DIR, 'grad_514.txt')
if name == "small_25":
fbvals = pjoin(DATA_DIR, 'small_25.bval')
fbvecs = pjoin(DATA_DIR, 'small_25.bvec')
fimg = pjoin(DATA_DIR, 'small_25.nii.gz')
return fimg, fbvals, fbvecs
if name == "S0_10":
fimg = pjoin(DATA_DIR, 'S0_10slices.nii.gz')
return fimg
if name == "test_piesno":
fimg = pjoin(DATA_DIR, 'test_piesno.nii.gz')
return fimg
if name == "reg_c":
return pjoin(DATA_DIR, 'C.npy')
if name == "reg_o":
return pjoin(DATA_DIR, 'circle.npy')
if name == 'cb_2':
return pjoin(DATA_DIR, 'cb_2.npz')
if name == "t1_coronal_slice":
return pjoin(DATA_DIR, 't1_coronal_slice.npy')
def _gradient_from_file(filename):
"""Reads a gradient file saved as a text file compatible with np.loadtxt
and saved in the dipy data directory"""
def gtab_getter():
gradfile = pjoin(DATA_DIR, filename)
grad = np.loadtxt(gradfile, delimiter=',')
gtab = GradientTable(grad)
return gtab
return gtab_getter
get_3shell_gtab = _gradient_from_file("gtab_3shell.txt")
get_isbi2013_2shell_gtab = _gradient_from_file("gtab_isbi2013_2shell.txt")
get_gtab_taiwan_dsi = _gradient_from_file("gtab_taiwan_dsi.txt")
def dsi_voxels():
fimg, fbvals, fbvecs = get_data('small_101D')
bvals = np.loadtxt(fbvals)
bvecs = np.loadtxt(fbvecs).T
img = load(fimg)
data = img.get_data()
gtab = gradient_table(bvals, bvecs)
return data, gtab
def dsi_deconv_voxels():
gtab = gradient_table(np.loadtxt(get_data('dsi515btable')))
data = np.zeros((2, 2, 2, 515))
for ix in range(2):
for iy in range(2):
for iz in range(2):
data[ix, iy, iz], dirs = SticksAndBall(gtab,
d=0.0015,
S0=100,
angles=[(0, 0),
(90, 0)],
fractions=[50, 50],
snr=None)
return data, gtab
def mrtrix_spherical_functions():
"""Spherical functions represented by spherical harmonic coefficients and
evaluated on a discrete sphere.
Returns
-------
func_coef : array (2, 3, 4, 45)
Functions represented by the coefficients associated with the
mxtrix spherical harmonic basis of order 8.
func_discrete : array (2, 3, 4, 81)
Functions evaluated on `sphere`.
sphere : Sphere
The discrete sphere, points on the surface of a unit sphere, used to
evaluate the functions.
Notes
-----
These coefficients were obtained by using the dwi2SH command of mrtrix.
"""
func_discrete = load(pjoin(DATA_DIR, "func_discrete.nii.gz")).get_data()
func_coef = load(pjoin(DATA_DIR, "func_coef.nii.gz")).get_data()
gradients = np.loadtxt(pjoin(DATA_DIR, "sphere_grad.txt"))
# gradients[0] and the first volume of func_discrete,
# func_discrete[..., 0], are associated with the b=0 signal.
# gradients[:, 3] are the b-values for each gradient/volume.
sphere = Sphere(xyz=gradients[1:, :3])
return func_coef, func_discrete[..., 1:], sphere
dipy_cmaps = None
def get_cmap(name):
"""Makes a callable, similar to maptlotlib.pyplot.get_cmap"""
global dipy_cmaps
if dipy_cmaps is None:
filename = pjoin(DATA_DIR, "dipy_colormaps.json")
with open(filename) as f:
dipy_cmaps = json.load(f)
desc = dipy_cmaps.get(name)
if desc is None:
return None
def simple_cmap(v):
"""Emulates matplotlib colormap callable"""
rgba = np.ones((len(v), 4))
for i, color in enumerate(('red', 'green', 'blue')):
x, y0, y1 = zip(*desc[color])
# Matplotlib allows more complex colormaps, but for users who do
# not have Matplotlib dipy makes a few simple colormaps available.
# These colormaps are simple because y0 == y1, and therefor we
# ignore y1 here.
rgba[:, i] = np.interp(v, x, y0)
return rgba
return simple_cmap
def two_cingulum_bundles():
fname = get_data('cb_2')
res = np.load(fname)
cb1 = relist_streamlines(res['points'], res['offsets'])
cb2 = relist_streamlines(res['points2'], res['offsets2'])
return cb1, cb2
def matlab_life_results():
matlab_rmse = np.load(pjoin(DATA_DIR, 'life_matlab_rmse.npy'))
matlab_weights = np.load(pjoin(DATA_DIR, 'life_matlab_weights.npy'))
return matlab_rmse, matlab_weights
| bsd-3-clause |
McIntyre-Lab/papers | fear_sem_sd_2015/scripts/dspr_gene_ggm_neighborhood_analysis.py | 1 | 4687 | #!/usr/bin/env python
import os
import logging
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import pickle
def setLogger(fname,loglevel):
""" Function to handle error logging """
logging.basicConfig(filename=fname, filemode='w', level=loglevel, format='%(asctime)s - %(levelname)s - %(message)s')
def readData(fname):
""" Importing a large DOT file is slow. This function will read a pickle
file if available. If no pickle, then read DOT and create a pickle for next
time. """
pname = os.path.splitext(fname)[0] + ".gpickle"
try:
# If there is a pickle, unpickle it
logging.info("Unpickling file")
nxGraph = nx.Graph(nx.read_gpickle(pname))
except:
logging.info("No Pickled file, will import DOT")
try:
# No pickle, try the dot file
logging.info("Importing dot file")
nxGraph = nx.Graph(nx.read_dot(fname))
# Make pickle for next time
logging.info("Pickle graph for later use.")
nx.write_gpickle(nxGraph,pname)
except Exception:
logging.exception("Please provide a DOT formated file.")
return(nxGraph)
def cleanSet(geneList):
""" Take a set of gene names, remove the isoform information and collapse genes names. """
tmpList = list()
for gene in geneList:
gene2 = gene.split('_')[0]
if gene2 == 'fl':
gene2 = 'fl_2_d'
tmpList.append(gene2)
return(set(tmpList))
def getNeighbors(nxGraph, target, geneList, splicingFactors):
""" Search the primary and secondary neighborhoods. Return neighbors and counts. """
# Pull primary neighbor from target
primary = nxGraph.neighbors(target)
# Pull secondary neighbors
secondary = list()
for target2 in primary:
secondary.extend(nxGraph.neighbors(target2))
# Calculate the number of primary and secondary neighbors that are in my
# gene list of interest.
primSet = set(primary)
secSet = set([x for x in (primary+secondary) if x != target]) # remove the target gene from the secondary neighborhood.
sexPrim = primSet & set(geneList)
sexPrim2 = cleanSet(sexPrim)
sexPrimString = '|'.join([str(x) for x in sexPrim2])
sexSec = secSet & set(geneList)
sexSec2 = cleanSet(sexSec)
sexSecString = '|'.join([str(x) for x in sexSec2])
numPrimary = len(primSet)
numSecondary = len(secSet)
numPrimSex = len(sexPrim)
numSecSex = len(sexSec)
numSex = len(geneList)
flag_sex = 0
if target in geneList:
flag_sex = 1
flag_splice = 0
if target in splicingFactors:
flag_splice = 1
return([target, flag_sex, flag_splice, numPrimary, numPrimSex, numSecondary, numSecSex, numSex, sexPrimString, sexSecString])
def writeHeader(headerList, handle):
""" Write a header on to the csv file """
handle.write(','.join([str(x) for x in headerList]) + "\n")
def writeOutput(myOutList, handle):
""" Write output CSV """
handle.write(",".join([str(x) for x in myOutList]) + "\n")
if __name__ == "__main__":
lname = "/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/ggm/dsrp_gene_level_neighborhood_analysis.log"
dotname = "/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/ggm/dsrp_ggm_gene_FDR2.dot"
oname = "/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/ggm/dsrp_gene_level_neighborhood_analysis.csv"
# Turn on Logging if option --log was given
setLogger(lname,logging.INFO)
# Import Dot File
mygraph = readData(dotname)
# Create gene list by pulling all genes that don't start with 'CG'
logging.info("Creating gene list")
geneList = [x for x in mygraph.nodes_iter(data=False) if not x.startswith('CG')]
# I am wanting to highlight splicing factors in my output graph
logging.info("Creating splicing factor list")
splicingFactors = ['vir', 'Rbp1', 'B52', 'sqd', 'Psi', 'mub', 'Rm62', 'snf', 'Spf45', 'ps']
# Explore the nieghborhood and make CSV table and subplots
logging.info("Finding neighbors and writing output")
with open(oname, 'w') as OUT:
myHeader = ["FG", "FG_sex", "FG_splice", "num_primary", "num_primary_sex", "num_secondary", "num_secondary_sex", "num_possible_sex", "primary_sex_det_genes", "secondary_sex_det_genes"]
writeHeader(myHeader, OUT)
# Iterate through all genes in Sex Det
for node in mygraph.nodes_iter(data=False):
# Calculate primary and secondary nearest neighbors
myOut = getNeighbors(mygraph, node, geneList, splicingFactors)
writeOutput(myOut, OUT)
logging.info("Script Complete")
| lgpl-3.0 |
jlegendary/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 216 | 13290 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
| bsd-3-clause |
liangz0707/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
bmazin/SDR | Projects/NewDataPacket/test_packet.py | 1 | 2865 | #!/bin/usr/python
import numpy as np
import matplotlib.pyplot as plt
import struct
import sys
from bin import *
bin_data_0=str(np.load('bin_data_0.npy'))
bin_data_1=str(np.load('bin_data_1.npy'))
phase_timestream=np.loadtxt('phase_timestream.txt')
bin_max=len(bin_data_1)/4
addr0=969
addr1=3132
median = -0.0414051531
threshold = -1.10506996019
pulseMask = int(12*'1',2) #bitmask of 12 ones
timeMask = int(20*'1',2)#bitmask of 20 ones
N_pts = len(phase_timestream)
total_counts = addr1-addr0
phase_peaks = []
time_vals=[]
sec=0
print 'total_counts ',total_counts
n=addr0
elapsed = 0
initial_time = 0
#while elapsed < 4048:
for n in range(addr0,addr1):#range(addr1,bin_max)+range(0,addr0):
raw_data_1 = int(struct.unpack('>L', bin_data_1[n*4:n*4+4])[0])
raw_data_0 = int(struct.unpack('>L', bin_data_0[n*4:n*4+4])[0])
packet = (np.int64(raw_data_1)<<32)+np.int64(raw_data_0)
channel = np.int64(packet)>>56
if channel == 51:
beforePeak = bin12_9ToDeg(packet>>44 & pulseMask)
atPeak = bin12_9ToDeg(packet>>32 & pulseMask)
afterPeak = bin12_9ToDeg(packet>>20 & pulseMask)
peak = peakfit(beforePeak, atPeak, afterPeak)
time = packet & timeMask
if initial_time == 0:
initial_time = time
elapsed = time - initial_time
phase_peaks.append(atPeak)
timestamp=elapsed*1e-6
time_vals.append(timestamp)
print beforePeak,atPeak,afterPeak
else:
#packet is End of second (0xfffffffff)
sec+=1
print len(time_vals)
last_peak = -1
beforePeakList=[]
atPeakList=[]
afterPeakList=[]
paraPeakList=[]
timeList = []
baseList = []
alpha = 0.1
for iPt,pt in enumerate(phase_timestream):
if pt < threshold and pt > phase_timestream[iPt-1] and (last_peak == -1 or iPt - last_peak > 100):
last_peak = iPt
beforePeak = phase_timestream[iPt-2]
atPeak = phase_timestream[iPt-1]
afterPeak = phase_timestream[iPt]
para=peakfit(beforePeak,atPeak,afterPeak)
para = castBin(para,format='deg')
time=(iPt-1)*1e-6
startBase = (iPt/2048)*2048
lpf = phase_timestream[startBase]
alpha = castBin(alpha,format='rad')
alphaC = castBin(1-alpha,format='rad')
for jPt in np.arange(startBase,iPt-5):
lpf0 = castBin(alphaC * lpf,format='deg',nBits=16,binaryPoint=12)
lpf1 = castBin(alpha * phase_timestream[jPt],format='deg',nBits=16,binaryPoint=12)
lpf = lpf0+lpf1
lpf = castBin(lpf0+lpf1,format='deg')
baseList.append(lpf)
beforePeakList.append(beforePeak)
atPeakList.append(atPeak)
afterPeakList.append(afterPeak)
paraPeakList.append(para)
timeList.append(time)
print iPt,para,beforePeak,atPeak,afterPeak,lpf
total_timestream_counts = len(paraPeakList)
print total_timestream_counts
| gpl-2.0 |
ychfan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
giorgiop/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
muthujothi/CrowdAnalytix-CrimeRates-PredictiveModelling | selectFeatures.py | 1 | 1038 | import pandas as pd
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
import csv
from scipy.stats.stats import pearsonr
from collections import OrderedDict
from collections import defaultdict
#Load the train data
df_1 = pd.read_csv('C:/Pst Files/CrowdAnalytix/CrimeRates/CA_Crime_Rate_MungedData_Train.csv')
df_robbery_rate = df_1.ix[:,2]
#df_blacks = np.sqrt(df_1.ix[:,-1])
df_blacks = np.sqrt(df_1.ix[:,-1])
print pearsonr(df_blacks.values.reshape(-1,1), df_robbery_rate.values.reshape(-1,1))[0][0]
#print df_robbery_rate.name #get the name of the column like this
'''
feature_correlation = {}
for i in range (7, 120):
df_feature = df_1.ix[:,i]
ft_name = df_feature.name
ft_corr = round(((pearsonr(df_feature.values.reshape(-1,1), df_robbery_rate.values.reshape(-1,1)))[0])[0], 4)
feature_correlation[ft_name] = ft_corr
robbery_corr = OrderedDict(sorted(feature_correlation.items(), key=lambda t: (t[1])))
for k, v in robbery_corr.items():
print k + " ***** " + str(v)
'''
| mit |
schae234/gingivere | tests/lr.py | 2 | 1543 | import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import StratifiedKFold
import numpy as np
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
store = pd.HDFStore("D:/gingivere/data.h5")
df = store['master']
X = []
y = []
for name in df.file:
if 'Dog_1' in name and not 'test' in name:
print(name)
name = name.split('.')[0]
data = store[name]
data = data.astype('float64')
data = preprocessing.scale(data)
for row in data:
X = X + np.array_split(row, 6)
if 'interictal' in name:
y = y + [0]*6
elif 'preictal' in name:
y = y + [1]*6
store.close()
X = np.asarray(X)
r = (X.min(), X.max())
xx = []
for x in X:
xx.append(np.histogram(x, density=True, range=r)[0])
print(xx[-1])
X = np.asarray(xx)
y = np.asarray(y)
# clf = LinearRegression()
clf = RandomForestClassifier(n_estimators=20)
# clf = SVC(gamma=0.001, kernel='rbf', C=100)
skf = StratifiedKFold(y, n_folds=2)
for train_index, test_index in skf:
print("Detailed classification report:")
print()
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf.fit(X_train, y_train)
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(np.around(y_true), np.around(y_pred)))
print()
print(roc_auc_score(y_true, y_pred))
print()
| mit |
huazhisong/graduate_text | src/contrib_cnn/cnn_shallow.py | 1 | 10681 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from gensim.models.word2vec import KeyedVectors
import numpy as np
import pandas
from sklearn import metrics, grid_search
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 300
N_FILTERS = 10
POOLING_WINDOW = 2
POOLING_STRIDE = 1
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
l2_loss = 1.0
drop_out = 0.0
k = 1
filter_list = [3, 4, 5]
embedding_type = 'random'
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
embedding_file = '../../data/data_by_ocean/GoogleNews-vectors-negative300.bin'
pretrained_embedding = None
def print_operation(t):
print(t.op.name, ' ', t.get_shape().as_list())
def conception_layer(net, filter_lists, kw, n_filters=100):
outputs = []
for window_size in filter_lists:
print_operation(net)
with tf.name_scope("convolution_%s" % window_size):
conv = tf.layers.conv2d(
net,
filters=N_FILTERS,
kernel_size=[window_size, kw],
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
print_operation(conv)
pooling = tf.layers.max_pooling2d(
conv,
pool_size=(MAX_DOCUMENT_LENGTH - window_size + 1, 1),
strides=POOLING_STRIDE,
padding='VALID')
print_operation(pooling)
# Transpose matrix so that n_filters from convolution becomes width.
# pooling = tf.transpose(pooling, [0, 1, 3, 2])
outputs.append(pooling)
# print_operation(pooling)
num_filters_total = n_filters * len(filter_lists)
net = tf.concat(outputs, 3)
print_operation(net)
return tf.reshape(net, [-1, num_filters_total])
def get_pretrained_embedding(vocabulary_processor):
print("Load word2vec file {}\n".format(embedding_file))
initW = np.random.uniform(-0.25, 0.25, (n_words, EMBEDDING_SIZE))
word_vectors = KeyedVectors.load_word2vec_format(embedding_file, binary=True)
for word in word_vectors.vocab:
idx = vocabulary_processor.vocabulary_.get(word)
if idx != 0:
initW[idx] = word_vectors[word]
return initW
def cnn_model(features, labels, mode):
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
with tf.name_scope("embedding"):
word_vectors = None
if embedding_type == 'random':
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, -1)
elif embedding_type == 'static':
word_vectors = tf.Variable(tf.constant(0.0, shape=[n_words, EMBEDDING_SIZE]),
trainable=False, name="W")
word_vectors = word_vectors.assign(pretrained_embedding)
word_vectors = tf.nn.embedding_lookup(word_vectors, features[WORDS_FEATURE])
word_vectors = tf.expand_dims(word_vectors, -1)
elif embedding_type == 'train_static':
word_vectors = tf.Variable(tf.constant(0.0, shape=[n_words, EMBEDDING_SIZE]),
trainable=True, name="W")
word_vectors = word_vectors.assign(pretrained_embedding)
word_vectors = tf.nn.embedding_lookup(word_vectors, features[WORDS_FEATURE])
word_vectors = tf.expand_dims(word_vectors, -1)
elif embedding_type == 'multiple_static':
static_embedding = tf.Variable(tf.constant(0.0, shape=[n_words, EMBEDDING_SIZE]),
trainable=False, name="W")
static_embedding = static_embedding.assign(pretrained_embedding)
static_words = tf.nn.embedding_lookup(static_embedding, features[WORDS_FEATURE])
static_words = tf.expand_dims(static_words, -1)
none_static_embedding = tf.Variable(tf.constant(0.0, shape=[n_words, EMBEDDING_SIZE]),
trainable=True, name="W")
none_static_embedding = none_static_embedding.assign(pretrained_embedding)
none_static_words = tf.nn.embedding_lookup(none_static_embedding, features[WORDS_FEATURE])
none_static_words = tf.expand_dims(none_static_words, -1)
word_vectors = tf.concat([static_words, none_static_words], 3)
else:
print("embedding type error")
return
with tf.name_scope("layer_1"):
net = conception_layer(word_vectors, filter_list, EMBEDDING_SIZE, N_FILTERS)
print_operation(net)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.name_scope("dropout"):
net = tf.layers.dropout(net, drop_out)
with tf.name_scope("output"):
# Apply regular WX + B and classification
if l2_loss:
logits = tf.layers.dense(
net,
MAX_LABEL,
kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_loss),
bias_regularizer=tf.contrib.layers.l2_regularizer(l2_loss),
activation=None)
else:
logits = tf.layers.dense(
net, MAX_LABEL, activation=None)
print_operation(logits)
with tf.name_scope("prediction"):
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
one_hot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
labels = tf.cast(labels, tf.int64)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes),
'recall_k': tf.metrics.recall_at_k(
labels=labels, predictions=logits, k=k),
'average_precision_k': tf.metrics.sparse_average_precision_at_k(
labels=labels, predictions=logits, k=k),
'precision_k': tf.metrics.sparse_precision_at_k(
labels=labels, predictions=logits, k=k)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
global vocab_processor
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
global pretrained_embedding
pretrained_embedding = get_pretrained_embedding(vocab_processor)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
recall = metrics.recall_score(y_test, y_predicted, average='weighted')
print('Recall (sklearn): {0:f}'.format(recall))
precision = metrics.precision_score(y_test, y_predicted, average='micro')
print('precision (sklearn): {0:f}'.format(precision))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
print('recall_k (tensorflow): {0:f}'.format(scores['recall_k']))
print('precision_k (tensorflow): {0:f}'.format(scores['precision_k']))
print('average_precision_k (tensorflow): {0:f}'.format(scores['average_precision_k']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| agpl-3.0 |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pydocstyle/src/tests/test_cases/canonical_numpy_examples.py | 3 | 5315 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceded by a blank line.
"""
# Example source file from the official "numpydoc docstring guide"
# documentation (with the modification of commenting out all the original
# ``import`` lines, plus adding this note and ``Expectation`` code):
# * As HTML: https://numpydoc.readthedocs.io/en/latest/example.html
# * Source Python:
# https://github.com/numpy/numpydoc/blob/master/doc/example.py
# from __future__ import division, absolute_import, print_function
#
# import os # standard library imports first
#
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
#
# import numpy as np
# import matplotlib as mpl
# import matplotlib.pyplot as plt
#
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
import os
from .expected import Expectation
expectation = Expectation()
expect = expectation.expect
# module docstring expected violations:
expectation.expected.add((
os.path.normcase(__file__),
"D205: 1 blank line required between summary line and description "
"(found 0)"))
expectation.expected.add((
os.path.normcase(__file__),
"D213: Multi-line docstring summary should start at the second line"))
expectation.expected.add((
os.path.normcase(__file__),
"D400: First line should end with a period (not 'd')"))
expectation.expected.add((
os.path.normcase(__file__),
"D404: First word of the docstring should not be `This`"))
expectation.expected.add((
os.path.normcase(__file__),
"D415: First line should end with a period, question mark, or exclamation "
"point (not 'd')"))
@expect("D213: Multi-line docstring summary should start at the second line",
arg_count=3)
@expect("D401: First line should be in imperative mood; try rephrasing "
"(found 'A')", arg_count=3)
@expect("D413: Missing blank line after last section ('Examples')",
arg_count=3)
def foo(var1, var2, long_var_name='hi'):
r"""A one-line summary that does not use variable names.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_var_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
type_without_description
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
numpy.array : Relationship (optional).
numpy.ndarray : Relationship (optional), which could be fairly long, in
which case the line wraps here.
numpy.dot, numpy.linalg.norm, numpy.eye
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a Greek symbol like :math:`\omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a = [1, 2, 3]
>>> print([x + 3 for x in a])
[4, 5, 6]
>>> print("a\nb")
a
b
"""
# After closing class docstring, there should be one blank line to
# separate following codes (according to PEP257).
# But for function, method and module, there should be no blank lines
# after closing the docstring.
pass
| mit |
magicrub/MissionPlanner | Lib/site-packages/numpy/fft/fftpack.py | 59 | 39653 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| gpl-3.0 |
ketjow4/NOV | Lib/site-packages/numpy/fft/fftpack.py | 59 | 39653 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| gpl-3.0 |
Adai0808/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
adamhaney/airflow | setup.py | 1 | 12977 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import io
import logging
import os
import sys
import subprocess
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
PY3 = sys.version_info[0] == 3
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Custom compile assets command to compile and build the frontend
assets using npm and webpack.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.call('./airflow/www/compile_assets.sh')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Cannot compute the git version. {}'.format(e))
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version, sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async_packages = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
atlas = ['atlasclient>=0.1.2']
azure_blob_storage = ['azure-storage>=0.34.0']
azure_data_lake = [
'azure-mgmt-resource==1.2.2',
'azure-mgmt-datalake-store==0.4.0',
'azure-datalake-store==0.0.19'
]
azure_cosmos = ['azure-cosmos>=3.0.1']
azure_container_instances = ['azure-mgmt-containerinstance']
cassandra = ['cassandra-driver>=3.13.0']
celery = [
'celery>=4.1.1, <4.2.0',
'flower>=0.7.3, <1.0'
]
cgroups = [
'cgroupspy>=0.1.4',
]
# major update coming soon, clamp to 0.x
cloudant = ['cloudant>=0.5.9,<2.0']
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.17.1, <2'
]
databricks = ['requests>=2.20.0, <3']
datadog = ['datadog>=0.14.0']
doc = [
'mock',
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker~=3.0']
druid = ['pydruid>=0.4.1']
elasticsearch = [
'elasticsearch>=5.0.0,<6.0.0',
'elasticsearch-dsl>=5.0.0,<6.0.0'
]
emr = ['boto3>=1.0.0, <1.8.0']
gcp_api = [
'httplib2>=0.9.2',
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'google-cloud-container>=0.1.1',
'google-cloud-bigtable==0.31.0',
'google-cloud-spanner>=1.7.1',
'grpcio-gcp>=0.2.2',
'PyOpenSSL',
'pandas-gbq'
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
google_auth = ['Flask-OAuthlib>=0.9.1']
hdfs = ['snakebite>=2.7.8']
hive = [
'hmsclient>=0.1.0',
'pyhive>=0.6.0',
]
jdbc = ['jaydebeapi>=1.1.1']
jenkins = ['python-jenkins>=0.4.15']
jira = ['JIRA>1.0.7']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
kubernetes = ['kubernetes>=3.0.0',
'cryptography>=2.0.0']
ldap = ['ldap3>=2.5.1']
mssql = ['pymssql>=2.1.1']
mysql = ['mysqlclient>=1.3.6,<1.4']
oracle = ['cx_Oracle>=5.1.2']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = ['pinotdb>=0.1.1']
postgres = ['psycopg2>=2.7.4']
qds = ['qds-sdk>=1.10.4']
rabbitmq = ['librabbitmq>=1.6.1']
redis = ['redis>=2.10.5,<3.0.0']
s3 = ['boto3>=1.7.0, <1.8.0']
salesforce = ['simple-salesforce>=0.72']
samba = ['pysmbclient>=0.1.3']
segment = ['analytics-python>=1.2.9']
sendgrid = ['sendgrid>=5.2.0']
slack = ['slackclient>=1.0.0']
mongo = ['pymongo>=3.6.0']
snowflake = ['snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0']
ssh = ['paramiko>=2.1.1', 'pysftp>=0.2.9', 'sshtunnel>=0.1.4,<0.2']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
winrm = ['pywinrm==0.2.2']
zendesk = ['zdesk']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant + druid + pinot \
+ cassandra + mongo
devel = [
'click==6.7',
'freezegun',
'jira',
'lxml>=4.0.0',
'mock',
'mongomock',
'moto==1.3.5',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'paramiko',
'pysftp',
'pywinrm',
'qds-sdk>=1.9.6',
'rednose',
'requests_mock',
'flake8>=3.6.0',
]
if not PY3:
devel += ['unittest2']
devel_minreq = devel + kubernetes + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_azure = devel_minreq + azure_data_lake + azure_cosmos
devel_all = (sendgrid + devel + all_dbs + doc + samba + s3 + slack + crypto + oracle +
docker + ssh + kubernetes + celery + azure_blob_storage + redis + gcp_api +
datadog + zendesk + jdbc + ldap + kerberos + password + webhdfs + jenkins +
druid + pinot + segment + snowflake + elasticsearch + azure_data_lake + azure_cosmos +
atlas + azure_container_instances)
# Snakebite & Google Cloud Dataflow are not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8']]
else:
devel_ci = devel_all
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.9, <1.0',
'bleach~=2.1.3',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'enum34~=1.1.6',
'flask>=0.12.4, <0.13',
'flask-appbuilder==1.12.1',
'flask-admin==1.5.2',
'flask-caching>=1.3.3, <1.4.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'json-merge-patch==0.2',
'jinja2>=2.7.3, <=2.10.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.20.0, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.3.0',
'tabulate>=0.7.5, <=0.8.2',
'tenacity==4.8.0',
'text-unidecode==1.2', # Avoid GPL dependency, pip uses reverse order(!)
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async_packages,
'azure_blob_storage': azure_blob_storage,
'azure_data_lake': azure_data_lake,
'azure_cosmos': azure_cosmos,
'azure_container_instances': azure_container_instances,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'devel_azure': devel_azure,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'google_auth': google_auth,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.apache.org',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
'compile_assets': CompileAssets
},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
pushpajnc/models | creating_customer_segments/renders.py | 1 | 4134 | import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
from sklearn.decomposition import pca
def pca_results(good_data, pca):
'''
Create a DataFrame of the PCA results
Includes dimension feature weights and explained variance
Visualizes the PCA results
'''
# Dimension indexing
dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = good_data.keys())
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1)
def cluster_results(reduced_data, preds, centers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions
Adds cues for cluster centers and student-selected sample data
'''
predictions = pd.DataFrame(preds, columns = ['Cluster'])
plot_data = pd.concat([predictions, reduced_data], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned cluster
for i, cluster in plot_data.groupby('Cluster'):
cluster.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i)*1.0/(len(centers)-1)), label = 'Cluster %i'%(i), s=30);
# Plot centers with indicators
for i, c in enumerate(centers):
ax.scatter(x = c[0], y = c[1], color = 'white', edgecolors = 'black', \
alpha = 1, linewidth = 2, marker = 'o', s=200);
ax.scatter(x = c[0], y = c[1], marker='$%d$'%(i), alpha = 1, s=100);
# Plot transformed sample points
ax.scatter(x = pca_samples[:,0], y = pca_samples[:,1], \
s = 150, linewidth = 4, color = 'black', marker = 'x');
# Set plot title
ax.set_title("Cluster Learning on PCA-Reduced Data - Centroids Marked by Number\nTransformed Sample Data Marked by Black Cross");
def channel_results(reduced_data, outliers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions using the full dataset
Data is labeled by "Channel" and cues added for student-selected sample data
'''
# Check that the dataset is loadable
try:
full_data = pd.read_csv("customers.csv")
except:
print "Dataset could not be loaded. Is the file missing?"
return False
# Create the Channel DataFrame
channel = pd.DataFrame(full_data['Channel'], columns = ['Channel'])
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data, channel], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned Channel
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
grouped = labeled.groupby('Channel')
for i, channel in grouped:
channel.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i-1)*1.0/2), label = labels[i-1], s=30);
# Plot transformed sample points
for i, sample in enumerate(pca_samples):
ax.scatter(x = sample[0], y = sample[1], \
s = 200, linewidth = 3, color = 'black', marker = 'o', facecolors = 'none');
ax.scatter(x = sample[0]+0.25, y = sample[1]+0.3, marker='$%d$'%(i), alpha = 1, s=125);
# Set plot title
ax.set_title("PCA-Reduced Data Labeled by 'Channel'\nTransformed Sample Data Circled");
| mit |
vsoch/nidmviewer | nidmviewer/sparql.py | 1 | 4830 | '''
sparql.py: part of the nidmviewer package
Sparql queries
Copyright (c) 2014-2018, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import rdflib
import rdfextras
rdfextras.registerplugins()
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
from pandas import DataFrame
def do_query(ttl_file,query,rdf_format="turtle",serialize_format="csv",output_df=True):
g = rdflib.Graph()
g.parse(ttl_file,format=rdf_format)
result = g.query(query)
if result is None:
print("No results matching query.")
else:
print("Found results matching query.")
result = result.serialize(format=serialize_format)
if output_df == True:
if isinstance(result, bytes):
result = result.decode('utf-8')
result = StringIO(result)
return DataFrame.from_csv(result,sep=",")
return result
def get_coordinates(ttl_file):
query = """
SELECT DISTINCT ?name ?coordinate ?z_score ?peak_name ?pvalue_uncorrected
WHERE {?coord a nidm:NIDM_0000015 ;
rdfs:label ?name ;
nidm:NIDM_0000086 ?coordinate .
?peak prov:atLocation ?coord ;
nidm:NIDM_0000092 ?z_score ;
rdfs:label ?peak_name ;
nidm:NIDM_0000116 ?pvalue_uncorrected .}
ORDER BY ?name
"""
return do_query(ttl_file,query)
def get_coordinates_and_maps(ttl_file):
query = """
PREFIX nidm: <http://purl.org/nidash/nidm#>
PREFIX prov: <http://www.w3.org/ns/prov#>
prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>
prefix spm: <http://purl.org/nidash/spm#>
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
prefix peak: <http://purl.org/nidash/nidm#NIDM_0000062>
prefix significant_cluster: <http://purl.org/nidash/nidm#NIDM_0000070>
prefix coordinate: <http://purl.org/nidash/nidm#NIDM_0000086>
prefix equivalent_zstatistic: <http://purl.org/nidash/nidm#NIDM_0000092>
prefix pvalue_fwer: <http://purl.org/nidash/nidm#NIDM_0000115>
prefix pvalue_uncorrected: <http://purl.org/nidash/nidm#NIDM_0000116>
prefix statistic_map: <http://purl.org/nidash/nidm#NIDM_0000076>
prefix statistic_type: <http://purl.org/nidash/nidm#NIDM_0000123>
prefix nidm_ExcursionSetMap: <http://purl.org/nidash/nidm#NIDM_0000025>
SELECT DISTINCT ?statmap ?excsetmap_location ?statmap_type ?z_score
?pvalue_uncorrected ?coordinate_id ?coord_name ?coordinate ?exc_set
WHERE {
?statmap a statistic_map: ;
statistic_type: ?statmap_type .
?exc_set a nidm_ExcursionSetMap: ;
prov:wasGeneratedBy/prov:used ?statmap ;
prov:atLocation ?excsetmap_location .
OPTIONAL {
?peak prov:wasDerivedFrom/prov:wasDerivedFrom/prov:wasGeneratedBy/prov:used ?statmap ;
prov:atLocation ?coord ;
equivalent_zstatistic: ?z_score ;
pvalue_uncorrected: ?pvalue_uncorrected ;
prov:atLocation ?coordinate_id .
?coordinate_id rdfs:label ?coord_name ;
coordinate: ?coordinate .
}
}
"""
return do_query(ttl_file,query)
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
wmunters/py4sp | plotting/plot_sp.py | 1 | 4504 | import numpy as np
import load_sp as lsp
import matplotlib.pyplot as plt
import os
import windfarm as wf
def set_equal_tight(ax=plt.gca()):
ax.set_aspect('equal')
ax.autoscale(tight=True)
def plot_field_turbines(fieldfile='BL_field.dat', key='u', k=16):
bl = lsp.load_BLfield_real(fieldfile)
field = bl[key][:,:,k]
farm = wf.Windfarm()
plt.figure()
plt.imshow(np.flipud(np.transpose(field)), extent=(0, bl['Lx'], 0, bl['Ly']))
farm.plot_turbines()
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.xlim((0, bl['Lx']))
plt.ylim((0, bl['Ly']))
def plot_bl(bl, kplot=14, iplot=0, jplot=0, key='u', Lz = 1):
plt.figure()
plt.subplot(311)
plt.imshow(np.flipud(np.transpose(bl[key][:,:,kplot])), extent=(0, bl['Lx'], 0, bl['Ly']))
plt.colorbar()
plt.subplot(312)
plt.imshow(np.flipud(np.transpose(bl[key][iplot,:,:])), extent=(0, bl['Ly'], 0, Lz))
plt.colorbar()
plt.subplot(313)
plt.imshow(np.flipud(np.transpose(bl[key][:,jplot,:])), extent=(0, bl['Lx'], 0, Lz))
plt.colorbar()
def plot_turbines_topview(filename):
turbines = np.loadtxt(filename, skiprows=2)
for turbine in turbines:
xcoord = turbine[0]
ycoord = turbine[1]
radius = turbine[4]
ycoords = np.array([ycoord - radius, ycoord + radius])
plt.plot((xcoord, xcoord), ycoords, 'k', lw=2)
def movie_xy(k, dt, var='u', setuppath='./../', pausetime=0.1, windfarmyaw=True, **kwargs):
"""
function movie_xy
Parameters
------------
k: int
grid index in zmesh where slices should be plotted
dt: float
timestep betwee snapshots
var: str, optional
variable to be plotted, default is 'u'
setuppath : str, optional
path where setupfile is located, default is './../'
clim: tuple, optional kwarg
colorbar limits for movie, default is (0,25)
cmap: str, optional
colormap used in movie, default is 'jet'
tstart: float, optional
initial time for movie snapshots
tstop: float, optional
final time for movie snapshots
"""
setup = lsp.setup(setuppath)
if 'clim' in kwargs:
cl = kwargs['clim']
else:
cl = (0,25)
if 'cm' in kwargs:
cmap = kwargs['cm']
else:
cmap = 'jet'
if windfarmyaw:
farm = wf.Windfarm(path=setuppath)
if 'tstop' in kwargs:
t = np.arange(kwargs['tstart'], kwargs['tstop'], dt)
print('Making movie for t = ', t)
for tind, tim in enumerate(t):
plt.clf()
print('Plotting t =', tim)
filename = var+'_zplane_k{:03d}_t_{:4.4f}.dat'.format(k, tim)
plt.title(tim)
plot_planexy(filename,show=False,prin=False,clim=cl,cm=cmap)
if windfarmyaw:
farm.plot_turbines_yaw(index=tind)
plt.pause(pausetime)
else:
print('Automatic timeloop not yet implemented')
def plot_planexy(filename, Nx=0, Ny=0, show=True, prin=True,**kwargs):
# First read from setup
if(os.path.exists('./../NS.setup')):
if prin:
print('Reading grid dimensions from setup file')
setup = lsp.setup('./../')
Nxd = setup.Nx2
Nyd = setup.Ny
if prin:
print('Nx = ', Nxd)
print('Ny = ', Nyd)
else:
print('Taking grid dimensions from input parameters')
Nxd = Nx
Nyd = Ny
# data = lsp.load_plane(filename, Nxd, Nyd)
data = lsp.load_plane_single(filename, Nxd, Nyd)
if 'cm' in kwargs:
cmap = kwargs['cm']
else:
cmap = 'jet'
plt.imshow(np.flipud(np.transpose(data)), extent=(0, setup.Lx, 0, setup.Ly),cmap=cmap, interpolation='bilinear'); plt.colorbar()
if 'clim' in kwargs:
plt.clim(kwargs['clim'])
if show:
plt.show()
def plot_planeyz(filename, Ny, Nz):
return 0
def plot_planexz(filename, Nx, Nz):
return 0
def make_movie(time_array,N1,N2):
for t in time_array:
plt.clf()
tstr = "{:6.4f}".format(t)
print( 'Loading t = ', tstr)
basefilename = '_zplane_k013_t_'
filenameu = 'u'+basefilename+tstr+'.dat'
filenamev = 'v'+basefilename+tstr+'.dat'
u = lsp.load_plane(filenameu, N1=N1, N2=N2)
v = lsp.load_plane(filenamev, N1=N1, N2=N2)
plt.pcolormesh(np.transpose(np.sqrt(u**2+v**2)))
plt.clim((0, 30))
plt.colorbar()
plt.savefig('u_'+tstr+'.png')
| gpl-2.0 |
ishanic/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
z/xonotic-map-repository | bin/entities_map.py | 1 | 2980 | #!/usr/bin/env python3
# Description: Plots entities on radars
# Author: Tyler "-z-" Mulligan
from matplotlib import pyplot as plt
import numpy as np
import matplotlib as mpl
import matplotlib.font_manager as font_manager
import struct
import sys
import os
from xmr.entities import *
path_entities = 'resources/entities/'
path_radar = 'resources/radars/gfx/'
path_bsp = 'resources/bsp/'
def main():
bsp_name = sys.argv[1]
entities_file = path_entities + bsp_name + '.ent'
radar_image = path_radar + bsp_name + '_mini.png'
map_entities = parse_entity_file(entities_file)
entities_list = entities_mapping.keys()
props = entity_properties
these = props.keys()
entities = dict((k, []) for k in these)
x0, x1, y0, y1 = get_map_boundaries(path_bsp + bsp_name + '/maps/' + bsp_name + '.bsp')
#aspect = max(abs(x0) + abs(x1), abs(y0) + abs(y1)) / 512
#longest = max(abs(x0) + abs(x1), abs(y0) + abs(y1)) / 2
fig, ax = plt.subplots()
plt.figure(figsize=(6, 6), dpi=96)
plt.axis('off')
for e in map_entities:
if 'origin' in e:
if e['classname'] in entities_list:
classname = entities_mapping[e['classname']]
origin = e['origin'].split()
origin.pop()
xy = tuple(origin)
if classname in entities:
entities[classname].append(xy)
plot_entities_list = entities.keys()
for classname in entities:
if classname in plot_entities_list:
if entities[classname]:
plot_it(entities[classname], props[classname]['symbol'], props[classname]['color'], int(props[classname]['size']))
# add image
img = plt.imread(radar_image)
plt.imshow(img, extent=[x0, x1, y0, y1], origin='upper', aspect='auto')
plt.axes().set_aspect('equal', 'datalim')
plt.show()
fig.tight_layout()
fig.canvas.draw()
plt.savefig("resources/entities_maps/" + bsp_name + ".png", transparent=True, dpi=120)
def get_map_boundaries(bsp_file):
f = open(bsp_file, 'rb')
f.seek(32)
bytes = f.read(4)
next_int = struct.unpack('i', bytes)
f.seek(next_int[0] + 12)
bytes = f.read(12)
min_coords = struct.unpack('iii', bytes)
bytes = f.read(12)
max_coords = struct.unpack('iii', bytes)
min_x, min_y, min_z = min_coords
max_x, max_y, max_z = max_coords
return min_x, max_x, min_y, max_y
def plot_it(zipped, symbol, color, scale):
x, y = zip(*zipped)
s = [scale]
font_name = 'icomoon.ttf'
path = os.path.join('static', 'css', 'fonts', font_name)
prop = font_manager.FontProperties(fname=path)
if symbol is 'o':
plt.scatter(x, y, s, c=color, alpha=1, marker=symbol)
else:
for x0, y0 in zipped:
plt.text(x0, y0, symbol, fontproperties=prop, size=scale, va='center', ha='center', clip_on=True, color=color)
if __name__ == "__main__":
main()
| mit |
shivaenigma/electrum | plugins/plot.py | 5 | 3566 | from PyQt4.QtGui import *
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
import datetime
from electrum.util import format_satoshis
from electrum.bitcoin import COIN
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def is_available(self):
if flag_matlib:
return True
else:
return False
@hook
def export_history_dialog(self, window, hbox):
wallet = window.wallet
history = wallet.get_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(wallet, history))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self, wallet, history):
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans = 0
pending_trans = 0
counter_trans = 0
balance = 0
for item in history:
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_Val.append(1000.*balance/COIN)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans += 1
pass
else:
unknown_trans += 1
else:
pending_trans += 1
value_val.append(1000.*value/COIN)
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('mBTC')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| gpl-3.0 |
jreback/pandas | pandas/tests/series/test_unary.py | 3 | 1755 | import pytest
from pandas import Series
import pandas._testing as tm
class TestSeriesUnaryOps:
# __neg__, __pos__, __inv__
def test_neg(self):
ser = tm.makeStringSeries()
ser.name = "series"
tm.assert_series_equal(-ser, -1 * ser)
def test_invert(self):
ser = tm.makeStringSeries()
ser.name = "series"
tm.assert_series_equal(-(ser < 0), ~(ser < 0))
@pytest.mark.parametrize(
"source, target",
[
([1, 2, 3], [-1, -2, -3]),
([1, 2, None], [-1, -2, None]),
([-1, 0, 1], [1, 0, -1]),
],
)
def test_unary_minus_nullable_int(
self, any_signed_nullable_int_dtype, source, target
):
dtype = any_signed_nullable_int_dtype
ser = Series(source, dtype=dtype)
result = -ser
expected = Series(target, dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]])
def test_unary_plus_nullable_int(self, any_signed_nullable_int_dtype, source):
dtype = any_signed_nullable_int_dtype
expected = Series(source, dtype=dtype)
result = +expected
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"source, target",
[
([1, 2, 3], [1, 2, 3]),
([1, -2, None], [1, 2, None]),
([-1, 0, 1], [1, 0, 1]),
],
)
def test_abs_nullable_int(self, any_signed_nullable_int_dtype, source, target):
dtype = any_signed_nullable_int_dtype
ser = Series(source, dtype=dtype)
result = abs(ser)
expected = Series(target, dtype=dtype)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
ajylee/gpaw-rtxs | gpaw/testing/old_molecule_test.py | 1 | 6841 | # -*- coding: utf-8 -*-
import sys
import pickle
import traceback
import os.path as path
from ase.data.g2_1 import data
from ase.structure import molecule
from ase.data.molecules import latex
from ase.atoms import string2symbols
from ase.parallel import paropen
from ase.parallel import rank, barrier
from ase.io.trajectory import PickleTrajectory
from ase.units import kcal, mol
import numpy as np
try:
import matplotlib
matplotlib.use('Agg')
import pylab as plt
except ImportError:
pass
from gpaw import GPAW, restart, ConvergenceError
from gpaw.testing.atomization_data import atomization_vasp, diatomic
dimers = diatomic.keys()
dimers.remove('FH')
molecules = atomization_vasp.keys()
atoms = set()
for m in molecules:
atoms.update(molecule(m).get_chemical_symbols())
systems = molecules + list(atoms)
def atomization_energies(E):
"""Write given atomization energies to file atomization_energies.csv."""
Ea = {}
fd = open('atomization_energies.csv', 'w')
for formula in sorted(molecules):
try:
ea = -E[formula]
for a in string2symbols(data[formula]['symbols']):
ea += E[a]
eavasp = atomization_vasp[formula][1] * kcal / mol
Ea[formula] = (ea, eavasp)
name = latex(data[formula]['name'])
fd.write('`%s`, %.3f, %.3f, %+.3f\n' %
(name[1:-1], ea, eavasp, ea - eavasp))
except KeyError:
pass # Happens if required formula or atoms are not in E
return Ea
def bondlengths(Ea, dE):
"""Calculate bond lengths and write to bondlengths.csv file"""
B = []
E0 = []
csv = open('bondlengths.csv', 'w')
for formula, energies in dE:
bref = diatomic[formula][1]
b = np.linspace(0.96 * bref, 1.04 * bref, 5)
e = np.polyfit(b, energies, 3)
if not formula in Ea:
continue
ea, eavasp = Ea[formula]
dedb = np.polyder(e, 1)
b0 = np.roots(dedb)[1]
assert abs(b0 - bref) < 0.1
b = np.linspace(0.96 * bref, 1.04 * bref, 20)
e = np.polyval(e, b) - ea
if formula == 'O2':
plt.plot(b, e, '-', color='0.7', label='GPAW')
else:
plt.plot(b, e, '-', color='0.7', label='_nolegend_')
name = latex(data[formula]['name'])
plt.text(b[0], e[0] + 0.2, name)
B.append(bref)
E0.append(-eavasp)
csv.write('`%s`, %.3f, %.3f, %+.3f\n' %
(name[1:-1], b0, bref, b0 - bref))
plt.plot(B, E0, 'g.', label='reference')
plt.legend(loc='lower right')
plt.xlabel(u'Bond length [Å]')
plt.ylabel('Energy [eV]')
plt.savefig('bondlengths.png')
def read_and_check_results():
"""Read energies from .gpw files."""
fd = sys.stdout
E = {}
fd.write('E = {')
for formula in systems:
try:
atoms, calc = restart(formula, txt=None)
except (KeyError, IOError):
#print formula
continue
nspins = calc.get_number_of_spins()
fa = calc.get_occupation_numbers(spin=0)
assert ((fa.round() - fa)**2).sum() < 1e-14
if nspins == 2:
fb = calc.get_occupation_numbers(spin=1)
assert ((fb.round() - fb)**2).sum() < 1e-9
if len(atoms) == 1:
M = data[formula]['magmom']
else:
M = sum(data[formula]['magmoms'])
assert abs((fa-fb).sum() - M) < 1e-9
e = calc.get_potential_energy()
fd.write("'%s': %.3f, " % (formula, e))
fd.flush()
E[formula] = e
dE = [] # or maybe {} ?
fd.write('}\ndE = [')
for formula in dimers:
try:
trajectory = PickleTrajectory(formula + '.traj', 'r')
except IOError:
continue
energies = [a.get_potential_energy() for a in trajectory]
dE.append((formula, (energies)))
fd.write("('%s', (" % formula)
fd.write(', '.join(['%.4f' % (energy - E[formula])
for energy in energies]))
fd.write(')),\n ')
fd.write(']\n')
return E, dE
class Test:
def __init__(self, vacuum=6.0, h=0.16, xc='PBE', setups='paw',
eigensolver='rmm-diis', basis=None,
calculate_dimer_bond_lengths=True, txt=sys.stdout):
self.vacuum = vacuum
self.h = h
self.xc = xc
self.setups = setups
self.eigensolver = eigensolver
if basis is None:
basis = {}
self.basis = basis
self.calculate_dimer_bond_lengths=calculate_dimer_bond_lengths
if isinstance(txt, str):
txt = open(txt + '.log', 'w')
self.txt = txt
def do_calculations(self, formulas):
"""Perform calculation on molecules, write results to .gpw files."""
atoms = {}
for formula in formulas:
for symbol in string2symbols(formula.split('_')[0]):
atoms[symbol] = None
formulas = formulas + atoms.keys()
for formula in formulas:
if path.isfile(formula + '.gpw'):
continue
barrier()
open(formula + '.gpw', 'w')
s = molecule(formula)
s.center(vacuum=self.vacuum)
cell = s.get_cell()
h = self.h
s.set_cell((cell / (4 * h)).round() * 4 * h)
s.center()
calc = GPAW(h=h,
xc=self.xc,
eigensolver=self.eigensolver,
setups=self.setups,
basis=self.basis,
fixmom=True,
txt=formula + '.txt')
if len(s) == 1:
calc.set(hund=True)
s.set_calculator(calc)
if formula == 'BeH':
calc.initialize(s)
calc.nuclei[0].f_si = [(1, 0, 0.5, 0, 0),
(0.5, 0, 0, 0, 0)]
if formula in ['NO', 'ClO', 'CH']:
s.positions[:, 1] += h * 1.5
try:
energy = s.get_potential_energy()
except (RuntimeError, ConvergenceError):
if rank == 0:
print >> sys.stderr, 'Error in', formula
traceback.print_exc(file=sys.stderr)
else:
print >> self.txt, formula, repr(energy)
self.txt.flush()
calc.write(formula)
if formula in diatomic and self.calculate_dimer_bond_lengths:
traj = PickleTrajectory(formula + '.traj', 'w')
d = diatomic[formula][1]
for x in range(-2, 3):
s.set_distance(0, 1, d * (1.0 + x * 0.02))
traj.write(s)
| gpl-3.0 |
jls713/jfactors | flattened/sampler.py | 1 | 9246 | ## Generate samples from triaxiality distributions for Figures 9 & 10 and Table 5 of Sanders, Evans & Geringer-Sameth
## ============================================================================
import numpy as np
from numpy import sqrt,cos,sin
import emcee
# import corner
import sys
sys.path.append('/home/jls/work/code/jfactors/')
import jfactors_py as jf
import pandas as pd
## ============================================================================
def observed_ellipticity(ba,ca,theta,phi):
'''
Observed ellipticity given intrinsic axis ratios and spherical polar line-of-sight (theta,phi)
--- Contopoulos 1956, Weijmans 2014 Appendix
This gives 1-\epsilon
'''
if(ca==1. and ba==1.):
return 1.
st,ct=sin(theta),cos(theta)
sp,cp=sin(phi),cos(phi)
ba2=ba*ba
ca2=ca*ca
m1ba2 = 1.-ba2
m1ca2 = 1.-ca2
A = m1ca2*ct*ct+m1ba2*st*st*sp*sp+ba2+ca2
B = m1ca2*ct*ct-m1ba2*st*st*sp*sp-ba2+ca2
B*=B
B+=4.*m1ca2*m1ba2*st*st*ct*ct*sp*sp
B = sqrt(B)
return sqrt((A-B)/(A+B))
def ba_from_Tca(T,ca):
''' b/a given triaxiality T and c/a '''
ba = sqrt(1.-T*(1.-ca*ca))
return ba
def axis_ratios(T,E):
''' (b/a,c/a) given triaxiality T and intrinsic ellipticity E=1-c/a '''
return ba_from_Tca(T,1.-E),1.-E
## ============================================================================
## Priors
def logp(x):
'''
Prior distribution - limiting 0<T<1, 0<E<1, 0<theta<pi/2, 0<phi<pi/2
and a factor sin(theta) for uniform sampling over sphere
'''
if(x[0]<0. or x[0]>1.):
return -np.inf
if(x[1]<0. or x[1]>1.):
return -np.inf
if(x[2]<0. or x[2]>.5*np.pi):
return -np.inf
if(x[3]<0. or x[3]>.5*np.pi):
return -np.inf
else:
return np.log(np.sin(x[2]))
def major_axis_prior(theta,phi):
''' Major axis within 0.1 rad of line-of-sight '''
sigma_phi=0.1
sigma_theta=0.1
return -phi**2/2./sigma_phi**2-(theta-.5*np.pi)**2/2./sigma_theta**2
def sanchez_janssen_prior(T,E):
''' Triaxiality = N(0.55, 0.04) and Ellipticity (0.51,0.12) from
Sanchez-Janssen et al. (2016) '''
T0=0.55
sigma_T=0.04
E0=0.51
sigma_E=0.12
return -(T-T0)**2/2./sigma_T**2-(E-E0)**2/2./sigma_E**2
## ============================================================================
## Likelihood
def logl(x,e_mean,e_err,ma_prior,sj_prior):
'''
Evaluates likelihood of observed ellipticity x given ellipticity
distributed normally with mean e_mean and s.d. e_err
(can be asymmetric error-bars).
ma_prior is a flag for the Major-Axis prior
sj_prior is a flag for the Sanchez-Janssen prior
'''
## Evaluate priors
p = logp(x)
if p==-np.inf:
return p
ba,ca=axis_ratios(x[0],x[1])
if(ca<0.05): ## a prior
return -np.inf
if(ma_prior):
p+=major_axis_prior(x[2],x[3])
if(sj_prior):
p+=sanchez_janssen_prior(x[0],x[1])
## Evaluate full posterior
oe = observed_ellipticity(ba,ca,x[2],x[3])
if isinstance(e_err,list):
if(oe>e_mean):
return p-(e_mean-oe)**2/2./e_err[0]**2 - .5*np.log(2.*np.pi*e_err[0]**2)
if(oe<e_mean):
return p-(e_mean-oe)**2/2./e_err[1]**2 - .5*np.log(2.*np.pi*e_err[1]**2)
return p-(e_mean-oe)**2/2./e_err**2 - .5*np.log(2.*np.pi*e_err**2)
## ============================================================================
## Main function
def compute_samples(nwalkers,e_m,e_s,ma_prior=False,sj_prior=False,withplots=None,nsteps=5000):
''' Generates a set of samples of (T,E,theta,phi) that produce the
distribution of observed ellipticity for each dwarf.
ma_prior is a flag for the Major-Axis prior
sj_prior is a flag for the Sanchez-Janssen prior
e_m = (1-epsilon), e_s is associated error -- can be list
(upper, lower errorbar)
'''
ndim=4
## Construct random initial samples uniformly distributed between lo and hi
lo=[0.,0.,0.,0.]
hi=[1.,1.,.5*np.pi,.5*np.pi]
p0=np.array([np.random.uniform(low=lo[k],high=hi[k],size=nwalkers) for k in range(ndim)]).T
sampler = emcee.EnsembleSampler(nwalkers,ndim,logl,
args=[e_m,e_s,ma_prior,sj_prior])
## We do a burn-in of 3 n_steps
pos,prob,state=sampler.run_mcmc(p0,3*nsteps)
sampler.reset()
## Now production run
pos,prob,state=sampler.run_mcmc(pos,nsteps)
samples = sampler.chain.reshape((-1,ndim))
if(withplots):
fig=corner.corner(samples,labels=[r'$T$',r'$E$',r'$\theta$',r'$\phi$'])
fig.savefig(withplots)
print np.median(pos,axis=0),np.std(pos,axis=0)
print np.median(sampler.acceptance_fraction)
return samples
def samples(e_m,e_s,size,ffile,ma_prior=False,sj_prior=False,geo_factor=True,withplots=False):
''' Generates a set of <size> samples of (T,E,theta,phi) that produce the
distribution of observed ellipticity for each dwarf. For each sample
calculates the correction factor and outputs the results to ffile
ma_prior is a flag for the Major-Axis prior
sj_prior is a flag for the Sanchez-Janssen prior
geo_factor uses an additional factor sqrt(1-e) in spherical model
'''
### 1. Compute samples from emcee
nwalkers=50
ellip = 1.-e_m ## We work with (1-\epsilon) as observed_ellipticity computes 1-\epsilon
## The errors can either be a list (upper, lower errorbar) or a single value
ellip_errors = e_s
if(isinstance(e_s,list)):
ellip_errors=[e_s[1],e_s[0]]
## If e_s[0] or e_s = nan then e_m is a 90% upper limit and we use
## a Gaussian centered on ellip=1 with width e_m/2
if(isinstance(e_s,list)):
if(e_s[0]!=e_s[0]):
ellip_errors=[0.,e_m/2.]
ellip=1.
else:
if(e_s!=e_s):
ellip_errors=e_m/2.
ellip=1.
samples = compute_samples(nwalkers,ellip,ellip_errors,ma_prior,sj_prior,withplots)
### 2. Take <size> samples and compute correction factors
samples_redux=samples[np.random.randint(len(samples),size=size)]
rh=0.05
sig=3.22
Dist=30.
ang=0.5
with_multipole=True
sph_shape = np.array([0.999,0.99])
ssp=jf.PaperModel(sph_shape[0],sph_shape[1],rh,sig,with_multipole)
print_messages=False
withD = False
sph_viewing = np.array([0.01,0.01])
Jt = ssp.J_factor(sph_viewing[0],sph_viewing[1],Dist,ang,print_messages,withD,-1.)[0]
ff = open(ffile,'w')
for k,i in enumerate(samples_redux):
ba,ca=axis_ratios(i[0],i[1])
oe = observed_ellipticity(ba,ca,i[2],i[3])
print i,ba,ca,oe
ss=jf.PaperModel(ba,ca,rh,sig,with_multipole)
if(geo_factor):
ssp2=jf.PaperModel(sph_shape[0],sph_shape[1],rh*np.sqrt(ss.ellipticity(i[2],i[3])),sig,with_multipole)
Jt = ssp2.J_factor(sph_viewing[0],sph_viewing[1],Dist,ang,print_messages,withD,-1.)[0]
rr=np.log10(ss.J_factor(i[2],i[3],Dist,ang,print_messages,withD,-1.)[0]/Jt)
ll = logl(i,ellip,ellip_errors,ma_prior,sj_prior)
ff.write('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f %0.4f %0.4f %0.4f\n'%(i[0],i[1],i[2],i[3],rr,oe,ba,ca,ll))
ff.close()
## ============================================================================
def run_grid(geo_factor=True):
''' For each dwarf compute sample of correction factors under the
three assumptions and output to file '''
data = pd.read_csv('../data/data.dat',sep=' ')
N=500
for i in range(19,len(data)):
samples(data.ellip[i],[data.ellip_e1[i],data.ellip_e2[i]],N,
'triaxial_results/'+data.Name[i]+'_nop',False,False,
geo_factor=geo_factor,withplots=None)#'tmp.png')
samples(data.ellip[i],[data.ellip_e1[i],data.ellip_e2[i]],N,
'triaxial_results/'+data.Name[i]+'_ma',True,False,
geo_factor=geo_factor,withplots=None)#'tmp.png')
samples(data.ellip[i],[data.ellip_e1[i],data.ellip_e2[i]],N,
'triaxial_results/'+data.Name[i]+'_sj',False,True,
geo_factor=geo_factor,withplots=None)#'tmp.png')
def ret2(geo_factor=True):
''' For RetII compute sample of correction factors under the
three assumptions and output to file (higher res than above) '''
data = pd.read_csv('../data/data.dat',sep=' ')
i=21
samples(data.ellip[i],[data.ellip_e1[i],data.ellip_e2[i]],400,
'triaxial_results/'+data.Name[i]+'_nop_hr',False,False,
geo_factor=geo_factor,withplots=None)#'ret2_dist.png')
samples(data.ellip[i],[data.ellip_e1[i],data.ellip_e2[i]],400,
'triaxial_results/'+data.Name[i]+'_ma_hr',True,False,
geo_factor=geo_factor,withplots=None)#'ret2_dist.png')
samples(data.ellip[i],[data.ellip_e1[i],data.ellip_e2[i]],400,
'triaxial_results/'+data.Name[i]+'_sj_hr',False,True,
geo_factor=geo_factor,withplots=None)#'ret2_dist.png')
## ============================================================================
if __name__=="__main__":
run_grid()
# ret2()
## ============================================================================
| mit |
cerrno/neurokernel | examples/testLPU/visualize_testLPU.py | 1 | 1818 | #@author: Amol Kapoor
#date: 3-13-15
#Visualizer for simpleLPU stuff
import matplotlib as mpl
mpl.use('agg')
import neurokernel.LPU.utils.visualizer as vis
import networkx as nx
# Temporary fix for bug in networkx 1.8:
nx.readwrite.gexf.GEXF.convert_bool = {'false':False, 'False':False,
'true':True, 'True':True}
#starts up the visualizer code
V = vis.visualizer()
#takes in the input file as defined in data, and plots it
V.add_LPU('./data/simple_input.h5', LPU='Input')
V.add_plot({'type': 'waveform', 'ids': [[0]]}, 'input_Input')
#takes in the spike data/potential from the neuron output and plots it
V.add_LPU('simple_output_spike.h5',
'./data/simple_lpu.gexf.gz', 'Simple LPU (Spikes)')
#the [0,1] under ids should print both hh and leaky
V.add_plot({'type':'raster', 'ids': {0: [0]},
'yticks': [0], 'yticklabels': [0]},
'Simple LPU (Spikes)','Output')
V.add_LPU('simple_output_gpot.h5',
'./data/simple_lpu.gexf.gz', 'Simple LPU (Graded Potential)')
V.add_plot({'type': 'waveform', 'ids': {0:[0]}},
'Simple LPU (Graded Potential)', 'Output')
V.add_plot({'type': 'waveform', 'ids': {0:[1]}},
'Simple LPU (Graded Potential)', 'Output')
V.add_plot({'type': 'waveform', 'ids': {0:[2]}},
'Simple LPU (Graded Potential)', 'Output')
#vars for plots
#how often it updates
V._update_interval = 50
#rows and colums to plot in terms of size
V.rows = 5
V.cols = 1
#self explantory
V.fontsize = 10
#V.out_filename = 'simple_output.avi'
#V.codec = 'libtheora'
#time step
V.dt = 0.0001
#Changes the sizes on the axis
V.xlim = [0, 1.0]
V.ylim = [-70.0, 10.0]
#figure size
V.figsize = (16, 9)
V.title = "Simple LPU Testing RK4 Models"
#runs the visualizer
V.run('simple_output.png', 120)
| bsd-3-clause |
datapythonista/pandas | pandas/core/arrays/floating.py | 3 | 13304 | from __future__ import annotations
import warnings
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
ArrayLike,
DtypeObj,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_bool_dtype,
is_datetime64_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core.arrays.numeric import (
NumericArray,
NumericDtype,
)
from pandas.core.ops import invalid_comparison
from pandas.core.tools.numeric import to_numeric
class FloatingDtype(NumericDtype):
"""
An ExtensionDtype to hold a single size of floating dtype.
These specific implementations are subclasses of the non-public
FloatingDtype. For example we have Float32Dtype to represent float32.
The attributes name & type are set when these subclasses are created.
"""
def __repr__(self) -> str:
return f"{self.name}Dtype()"
@property
def _is_numeric(self) -> bool:
return True
@classmethod
def construct_array_type(cls) -> type[FloatingArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return FloatingArray
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# for now only handle other floating types
if not all(isinstance(t, FloatingDtype) for t in dtypes):
return None
np_dtype = np.find_common_type(
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype]" has no
# attribute "numpy_dtype"
[t.numpy_dtype for t in dtypes], # type: ignore[union-attr]
[],
)
if np.issubdtype(np_dtype, np.floating):
return FLOAT_STR_TO_DTYPE[str(np_dtype)]
return None
def coerce_to_array(
values, dtype=None, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
dtype : float dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is floating numpy array, preserve its dtype
if dtype is None and hasattr(values, "dtype"):
if is_float_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and dtype.startswith("Float"):
# Avoid DeprecationWarning from NumPy about np.dtype("Float64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), FloatingDtype):
try:
dtype = FLOAT_STR_TO_DTYPE[str(np.dtype(dtype))]
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err
if isinstance(values, FloatingArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"integer-na",
"mixed-integer-float",
]:
raise TypeError(f"{values.dtype} cannot be converted to a FloatingDtype")
elif is_bool_dtype(values) and is_float_dtype(dtype):
values = np.array(values, dtype=float, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(f"{values.dtype} cannot be converted to a FloatingDtype")
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("float64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
# TODO should this be a safe cast?
if mask.any():
values = values.copy()
values[mask] = np.nan
values = values.astype(dtype, copy=False) # , casting="safe")
else:
values = values.astype(dtype, copy=False) # , casting="safe")
return values, mask
class FloatingArray(NumericArray):
"""
Array of floating (optional missing) values.
.. versionadded:: 1.2.0
.. warning::
FloatingArray is currently experimental, and its API or internal
implementation may change without warning. Especially the behaviour
regarding NaN (distinct from NA missing values) is subject to change.
We represent a FloatingArray with 2 numpy arrays:
- data: contains a numpy float array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an FloatingArray from generic array-like input, use
:func:`pandas.array` with one of the float dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d float-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
FloatingArray
Examples
--------
Create an FloatingArray with :func:`pandas.array`:
>>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype())
<FloatingArray>
[0.1, <NA>, 0.3]
Length: 3, dtype: Float32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([0.1, None, 0.3], dtype="Float32")
<FloatingArray>
[0.1, <NA>, 0.3]
Length: 3, dtype: Float32
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 0.0
@cache_readonly
def dtype(self) -> FloatingDtype:
return FLOAT_STR_TO_DTYPE[str(self._data.dtype)]
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype.kind == "f"):
raise TypeError(
"values should be floating numpy array. Use "
"the 'pd.array' function instead"
)
super().__init__(values, mask, copy=copy)
@classmethod
def _from_sequence(
cls, scalars, *, dtype=None, copy: bool = False
) -> FloatingArray:
values, mask = coerce_to_array(scalars, dtype=dtype, copy=copy)
return FloatingArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype=None, copy: bool = False
) -> FloatingArray:
scalars = to_numeric(strings, errors="raise")
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=self.dtype)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, or BooleanArray, IntegerArray or FloatingArray with
'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an FloatingDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
# coerce
if is_float_dtype(dtype):
# In astype, we consider dtype=float to also mean na_value=np.nan
kwargs = {"na_value": np.nan}
elif is_datetime64_dtype(dtype):
# error: Dict entry 0 has incompatible type "str": "datetime64"; expected
# "str": "float"
kwargs = {"na_value": np.datetime64("NaT")} # type: ignore[dict-item]
else:
kwargs = {}
# error: Argument 2 to "to_numpy" of "BaseMaskedArray" has incompatible
# type "**Dict[str, float]"; expected "bool"
data = self.to_numpy(dtype=dtype, **kwargs) # type: ignore[arg-type]
return astype_nansafe(data, dtype, copy=False)
def _values_for_argsort(self) -> np.ndarray:
return self._data
def _cmp_method(self, other, op):
from pandas.arrays import (
BooleanArray,
IntegerArray,
)
mask = None
if isinstance(other, (BooleanArray, IntegerArray, FloatingArray)):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
# This may be fixed by NA.__array_ufunc__. Revisit this check
# once that's implemented.
result = np.zeros(self._data.shape, dtype="bool")
mask = np.ones(self._data.shape, dtype="bool")
else:
with warnings.catch_warnings():
# numpy may show a FutureWarning:
# elementwise comparison failed; returning scalar instead,
# but in the future will perform elementwise comparison
# before returning NotImplemented. We fall back to the correct
# behavior today, so that should be fine to ignore.
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
method = getattr(self._data, f"__{op.__name__}__")
result = method(other)
if result is NotImplemented:
result = invalid_comparison(self._data, other, op)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask)
def sum(self, *, skipna=True, min_count=0, **kwargs):
nv.validate_sum((), kwargs)
return super()._reduce("sum", skipna=skipna, min_count=min_count)
def prod(self, *, skipna=True, min_count=0, **kwargs):
nv.validate_prod((), kwargs)
return super()._reduce("prod", skipna=skipna, min_count=min_count)
def min(self, *, skipna=True, **kwargs):
nv.validate_min((), kwargs)
return super()._reduce("min", skipna=skipna)
def max(self, *, skipna=True, **kwargs):
nv.validate_max((), kwargs)
return super()._reduce("max", skipna=skipna)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# TODO are there cases we don't end up with float?
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
# if (is_float_dtype(other) or is_float(other)) or (
# op_name in ["rtruediv", "truediv"]
# ):
# result[mask] = np.nan
# return result
return type(self)(result, mask, copy=False)
_dtype_docstring = """
An ExtensionDtype for {dtype} data.
This dtype uses ``pd.NA`` as missing value indicator.
Attributes
----------
None
Methods
-------
None
"""
# create the Dtype
@register_extension_dtype
class Float32Dtype(FloatingDtype):
type = np.float32
name = "Float32"
__doc__ = _dtype_docstring.format(dtype="float32")
@register_extension_dtype
class Float64Dtype(FloatingDtype):
type = np.float64
name = "Float64"
__doc__ = _dtype_docstring.format(dtype="float64")
FLOAT_STR_TO_DTYPE = {
"float32": Float32Dtype(),
"float64": Float64Dtype(),
}
| bsd-3-clause |
tylerjereddy/scipy | scipy/cluster/tests/test_hierarchy.py | 12 | 42543 | #
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
import pytest
from pytest import raises as assert_raises
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette, cut_tree, optimal_leaf_ordering,
_order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
from scipy.spatial.distance import pdist
from scipy.cluster._hierarchy import Heap
from . import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib # type: ignore[import]
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt # type: ignore[import]
have_matplotlib = True
except Exception:
have_matplotlib = False
class TestLinkage:
def test_linkage_non_finite_elements_in_distance_matrix(self):
# Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
# Exception expected.
y = np.zeros((6,))
y[0] = np.nan
assert_raises(ValueError, linkage, y)
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted']:
self.check_linkage_tdist(method)
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
self.check_linkage_q(method)
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
metric="euclidean")
Z = linkage(y, method)
assert_allclose(Z, expectedZ, atol=1e-06)
def test_compare_with_trivial(self):
rng = np.random.RandomState(0)
n = 20
X = rng.rand(n, 2)
d = pdist(X)
for method, code in _LINKAGE_METHODS.items():
Z_trivial = _hierarchy.linkage(d, n, code)
Z = linkage(d, method)
assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
def test_optimal_leaf_ordering(self):
Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
assert_allclose(Z, expectedZ, atol=1e-10)
class TestLinkageTies:
_expectations = {
'single': np.array([[0, 1, 1.41421356, 2],
[2, 3, 1.41421356, 3]]),
'complete': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.82842712, 3]]),
'average': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'weighted': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'centroid': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'median': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'ward': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.44948974, 3]]),
}
def test_linkage_ties(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
self.check_linkage_ties(method)
def check_linkage_ties(self, method):
X = np.array([[-1, -1], [0, 0], [1, 1]])
Z = linkage(X, method=method)
expectedZ = self._expectations[method]
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent:
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
self.check_inconsistent_tdist(depth)
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance:
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion:
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster:
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fclusterdata(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fclusterdata(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fclusterdata(t, 'maxclust')
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fcluster(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster(t, 'maxclust')
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster_monocrit(t)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster_maxclust_monocrit(t)
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders:
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic:
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc)
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc, True, 5)
def test_is_isomorphic_7(self):
# Regression test for gh-6271
assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in range(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage:
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_linkage_various_size(nrow, ncol, valid)
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent:
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_im_various_size(nrow, ncol, valid)
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage:
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList:
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
self.check_leaves_list_Q(method)
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond:
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in range(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in range(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic:
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists:
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxdists_Q_linkage(method)
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts:
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxinconsts_Q_linkage(method)
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat:
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
self.check_maxRstat_invalid_index(i)
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
self.check_maxRstat_empty_linkage(i)
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
self.check_maxRstat_difrow_linkage(i)
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
self.check_maxRstat_one_cluster_linkage(i)
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
self.check_maxRstat_Q_linkage(method, i)
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram:
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
def test_labels_as_array_or_list(self):
# test for gh-12418
Z = linkage(hierarchy_test_data.ytdist, 'single')
labels = np.array([1, 3, 2, 6, 4, 5])
result1 = dendrogram(Z, labels=labels, no_plot=True)
result2 = dendrogram(Z, labels=labels.tolist(), no_plot=True)
assert result1 == result2
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_valid_label_size(self):
link = np.array([
[0, 1, 1.0, 4],
[2, 3, 1.0, 5],
[4, 5, 2.0, 6],
])
plt.figure()
with pytest.raises(ValueError) as exc_info:
dendrogram(link, labels=list(range(100)))
assert "Dimensions of Z and labels must be consistent."\
in str(exc_info.value)
with pytest.raises(
ValueError,
match="Dimensions of Z and labels must be consistent."):
dendrogram(link, labels=[])
plt.close()
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
self.check_dendrogram_plot(orientation)
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
assert_equal(testlabel.get_size(), 20)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_size(), 20)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C0'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9],
'leaves_color_list': ['C0', 'C0'],
})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def test_unsupported_uncondensed_distance_matrix_linkage_warning():
assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
def test_node_compare():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
def test_cut_tree():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
cutree = cut_tree(Z)
assert_equal(cutree[:, 0], np.arange(nobs))
assert_equal(cutree[:, -1], np.zeros(nobs))
assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
nodes = _order_cluster_tree(Z)
heights = np.array([node.dist for node in nodes])
assert_equal(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5))
assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]))
assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]))
def test_optimal_leaf_ordering():
# test with the distance vector y
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist),
hierarchy_test_data.ytdist)
expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
assert_allclose(Z, expectedZ, atol=1e-10)
# test with the observation matrix X
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'),
hierarchy_test_data.X)
expectedZ = hierarchy_test_data.linkage_X_ward_olo
assert_allclose(Z, expectedZ, atol=1e-06)
def test_Heap():
values = np.array([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
| bsd-3-clause |
jlegendary/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
rohanp/scikit-learn | sklearn/cross_decomposition/cca_.py | 151 | 3192 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
Atmosferica/Turbolenza | scripts/KDE/main.py | 1 | 1050 | #!/usr/bin/python
#default module
import numpy as np
import matplotlib.pyplot as plt
import scipy.fftpack as fftp
import scipy.optimize as opt
import sys
import os
import string
from statsmodels.nonparametric.kernel_density import KDEMultivariate
from bcolors import *
from funct import *
def kde_m(x, x_grid, bandwidth):
#kde = KDEMultivariate(x, bw=bandwidth * np.ones_like(x),var_type='c')
kde = KDEMultivariate(x, bw=bandwidth, var_type='c')
return kde.pdf(x_grid)
if __name__ == '__main__':
print "KDE and KD estimation"
print "Usage: ./main.py [File]\n"
n = len(sys.argv)
if n < 2:
print_fail("Error: too few arguments")
exit(1)
if n > 2:
print_fail("Error: too many arguments")
exit(1)
x,y,z,t=load_file(sys.argv[1])
name=sys.argv[1]
name=name.split('/')
name=name[len(name)-1]
name=name.split('.')[0]+"."+name.split('.')[1]
z_grid = np.linspace(-0.2, 0.2, 500)
plt.figure(1)
plt.hist(z,bins='auto')
plt.show()
| gpl-3.0 |
FYECorpusProject/thesaurus-and-citation | Histograms20150820/histoparagraphone.py | 2 | 38019 | #!/usr/bin/env python ## use python 2.7
import sys
from collections import defaultdict
from os import listdir
from histogramcode import Histogram
from histosentence import HistoSentence
##
import numpy as np
import matplotlib.pyplot as plt
ALIGNMENTDUMMYPARA = -9
ALIGNMENTDUMMYSUB = -99
DISTANCEDUMMY = -999
######################################################################
## FUNCTIONS
######################################################################
## check arguments
def checkArgs(number, message):
if len(sys.argv) != number:
print(message)
sys.exit(1)
######################################################################
## filter the data by type and category
## 'ALIGN', 'DELETIONS', 'INSERTIONS', and types 1, 2, 3, or 4
def filterTheData(theData, whichEvent, whichType, alignChoice=0):
histoDict = defaultdict(int)
for line in theData:
# print('FILTERA %s' % (line))
if whichEvent not in line[0]: continue
# print('FILTERB %s' % (line))
if (whichType == 0) or (whichType == int(line[2])):
# print('FILTERC %s' % (line))
if ('EDITDISTS' in whichEvent) or \
('INSERTBYDIST' in whichEvent) or \
('INSERTIONS' in whichEvent) or \
('DELETIONS' in whichEvent):
# print('FILTERD %s' % (line))
histoDict[int(line[3])] += int(line[4])
if ('SENTENCELENGTHDRAFT' in whichEvent) or \
('SENTENCELENGTHFINAL' in whichEvent):
# print('FILTERE %s' % (line))
histoDict[int(line[3])] += 1
if ('ALIGNMENT_COUNT' in whichEvent):
if 0 == alignChoice:
# print('FILTERE %s' % (line))
# subtract 1 from value because we want the number of
# useful phases and not the phase when nothing happened
histoDict[int(line[3])-1] += 1
if 1 == alignChoice:
# print('FILTERF %s' % (line))
# subtract 1 from value because we want the number of
# useful phases and not the phase when nothing happened
sentCount = int(line[4]) + int(line[5]) # draft
if 0 != sentCount:
fractionAligned = 100.0 * float(line[4]) / float(sentCount)
else:
fractionAligned = 0.0
# print('FILTERF %d %d %d %f' % (int(line[4]), int(line[5]), int(line[6]), fractionAligned))
histoDict[int(fractionAligned)] += 1
if 2 == alignChoice:
# print('FILTERG %s' % (line))
# subtract 1 from value because we want the number of
# useful phases and not the phase when nothing happened
sentCount = int(line[4]) + int(line[6]) # final
if 0 != sentCount:
fractionAligned = 100.0 * float(line[4]) / float(sentCount)
else:
fractionAligned = 0.0
# print('FILTERG %d %d %d %f' % (int(line[4]), int(line[5]), int(line[6]), fractionAligned))
histoDict[int(fractionAligned)] += 1
return histoDict
######################################################################
## filter the data by type and category
## 'ALIGN', 'DELETIONS', 'INSERTIONS', and types 1, 2, 3, 4, ...
def filterTheData2(theData, whichEvent, whichType, whichVersion):
histoDict = defaultdict(int)
for line in theData:
# print('FILTERA %s' % (line))
if whichEvent not in line[0]: continue
# print('FILTERB %s' % (line))
if (whichType == 0) or (whichType == int(line[2])):
# print('FILTERC %s' % (line))
if ('SENTENCECOUNTS' in whichEvent) or \
('PARACOUNTS' in whichEvent):
# print('FILTERE %s' % (line))
if whichVersion == 'draft':
histoDict[int(line[3])] += 1
elif whichVersion == 'final':
histoDict[int(line[4])] += 1
else:
print('ERROR IN VERSION CHOICE filterTheData2 %s' % (whichVersion))
sys.exit()
return histoDict
######################################################################
## alignment counts and fractions by level histogram code
def histoAlignmentFractionsByLevel(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
name = sent.getName()
level = int(sent.getAlignmentLevel())
if -1 == level: level = 999
localDict[level] += 1
# for level, freqs in sorted(localDict.items()):
# print('%5d %5d' % (level, freqs))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: fraction aligned in '+ which + '\n'
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'ALIGNMENTS FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict, 1, 1, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO PAPERS FOR %s\n' % (label))
outFile.write('NO PAPERS FOR %s\n' % (label))
######################################################################
## alignment levels histogram code
def histoAlignmentLevels(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
name = sent.getName()
level = int(sent.getAlignmentLevel())
if level > localDict[name]:
localDict[name] = level
localDict2 = defaultdict(int)
for name, maxValue in sorted(localDict.items()):
# print('%6s %5d' % (name, maxValue))
localDict2[maxValue] += 1
# for levels, freqs in sorted(localDict2.items()):
# print('%5d %5d' % (levels, freqs))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: last alignment with changes\n'
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'LAST ALIGNMENT FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict2) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict2, 1, 1, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO ALIGNMENTS FOR %s\n' % (label))
outFile.write('NO ALIGNMENTS FOR %s\n' % (label))
######################################################################
## deletions by paragraph histogram code
def histoDeletionsByPara(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
if sent.isAligned(): continue
paraNum = sent.getLeftParaSub()
localDict[paraNum] += 1
# for dist, freq in sorted(localDict.items()):
# print('%5d %5d' % (dist, freq))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: paragraph numbers\n'
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'DELETIONS BY PARAGRAPH FROM DRAFT FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict, 1, 1, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO DELETIONS FOR %s\n' % (label))
outFile.write('NO DELETIONS FOR %s\n' % (label))
######################################################################
## edit distance histogram code
def histoEditDistance(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
distRounded = int(round(sent.getEditDistFracOfWorst() * 100.0))
if distRounded < 0: distRounded = 999
localDict[distRounded] += 1
# for dist, freq in sorted(localDict.items()):
# print('%5d %5d' % (dist, freq))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: % change in aligned sentences from draft to final\n'
headerInfo += " 'UNALIGN' means deletions from draft, insertions into final\n"
headerInfo += 'Column 2: % of sentences with that change\n'
headerInfo += 'Column 3: raw numbers of sentences with that change\n'
headerInfo += 'Column 4: the histogram\n'
label = 'EDIT DISTANCE COMPARISONS FOR ' + which + ' OF TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict, 4, 4, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO EDIT DIST FRACS FOR %s\n' % (label))
outFile.write('NO EDIT DIST FRACS FOR %s\n' % (label))
if 0 != theType: return
localTypeListDict = defaultdict(list)
for key in range(0, 5):
localTypeListDict[key] = [0]
for key, sent in sorted(theData.items()):
if which != sent.getWhich(): continue
thisType = sent.getType()
distRounded = int(round(sent.getEditDistFracOfWorst() * 100.0))
if distRounded < 0: distRounded = -5
# do the actual type
thisList = localTypeListDict[thisType]
thisList.append(distRounded)
localTypeListDict[thisType] = thisList
# do the "all" type, which is type 0
thisList = localTypeListDict[0]
thisList.append(distRounded)
localTypeListDict[0] = thisList
multiset = []
for key, value in sorted(localTypeListDict.items()):
multiset.append(value)
# print('TYPE %d' % (key))
# print('type %d %s' % (key, value))
fig = plt.figure()
ax = fig.add_subplot(111)
numBins = 50
numBins = 25
ax.hist(multiset, numBins, color=['green','red','blue','lime','orange'], \
label = ['0', '1', '2', '3', '4'], alpha=0.8)
ax.legend(prop={'size': 10})
ax.set_title('Edit Distance Histograms ' + which)
# plt.show()
plt.savefig('EditDistHistograms' + which)
######################################################################
## insertions by edit dist frac of previous
def histoInsertionsByEditDistFrac(theData, theType, which, outFile):
paperSet = set()
paperCount = 0
beginningInsertionCount = 0
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
keySplit = key.split()
paperSet.add(keySplit[0])
if sent.isAligned(): continue
prevDist = sent.getPreviousDistance()
if prevDist < 0:
prevDist = 899
if 'FINAL' == which:
if 0 == sent.getRightParaSub() and 0 == sent.getRightSentSub():
beginningInsertionCount += 1
# print('INITIAL INSERTION %s %s' % (key, sent))
localDict[prevDist] += 1
# for dist, freq in sorted(localDict.items()):
# print('%5d %5d' % (dist, freq))
paperCount = len(paperSet)
# print('PAPER COUNT %3s' % (paperCount))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'There were initial insertions in %3d of %3d papers\n' % \
(beginningInsertionCount, paperCount)
headerInfo += 'Column 1: edit dist fracs before insertion\n'
# headerInfo += " the '899' means insertion at beginning of paper\n"
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'INSERTIONS BY EDIT DIST FRAC OF PREVIOUS SENTENCE FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict, 4, 4, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO INSERTIONS FOR %s\n' % (label))
outFile.write('NO INSERTIONS FOR %s\n' % (label))
######################################################################
## insertions by paragraph histogram code
def histoInsertionsByPara(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
if sent.isAligned(): continue
paraNum = sent.getRightParaSub()
localDict[paraNum] += 1
# for dist, freq in sorted(localDict.items()):
# print('%5d %5d' % (dist, freq))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: paragraph numbers\n'
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'INSERTIONS BY PARAGRAPH INTO FINAL FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict, 1, 1, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO INSERTIONS FOR %s\n' % (label))
outFile.write('NO INSERTIONS FOR %s\n' % (label))
######################################################################
## paragraph count histogram code
def histoParagraphCounts(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
name = sent.getName()
if 'DRAFT' == which:
paraNum = int(sent.getLeftParaSub())
else:
paraNum = int(sent.getRightParaSub())
if paraNum > localDict[name]:
localDict[name] = paraNum
localDict2 = defaultdict(int)
for name, maxValue in sorted(localDict.items()):
# print('%6s %5d' % (name, maxValue))
localDict2[maxValue] += 1
# for numParas, freq in sorted(localDict2.items()):
# print('%5d %5d' % (numParas, freq))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: # of paragraphs ' + which + '\n'
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'PARAGRAPH COUNTS FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict2) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict2, 1, 1, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO PARAGRAPHS FOR %s\n' % (label))
outFile.write('NO PARAGRAPHS FOR %s\n' % (label))
######################################################################
## sentence count histogram code
def histoSentenceCounts(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
name = sent.getName()
if 'DRAFT' == which:
sentNum = int(sent.getLeftSentSub())
else:
sentNum = int(sent.getRightSentSub())
if sentNum > localDict[name]:
localDict[name] = sentNum
localDict2 = defaultdict(int)
for name, maxValue in sorted(localDict.items()):
# print('%6s %5d' % (name, maxValue))
localDict2[maxValue] += 1
# for numSents, freq in sorted(localDict2.items()):
# print('%5d %5d' % (numSents, freq))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: # of paragraphs ' + which + '\n'
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'SENTENCE COUNTS FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict2) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict2, 2, 2, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO SENTENCES FOR %s\n' % (label))
outFile.write('NO SENTENCES FOR %s\n' % (label))
#######################################################################
### sentence length count histogram code
#def histoSentenceLengthCounts(theData, theType, which):
# localDict = defaultdict(int)
# for key, sent in sorted(theData.items()):
# if (0 != theType) and (theType != sent.getType()): continue
# if which != sent.getWhich(): continue
# name = sent.getName()
# if 'DRAFT' == which:
# sentLen = int(sent.getLength())
# else:
# sentLen = int(sent.getLength())
#
# if sentLen > localDict[name]:
# localDict[name] = sentLen
#
# localDict2 = defaultdict(int)
# for name, maxValue in sorted(localDict.items()):
# print('%6s %5d' % (name, maxValue))
# localDict2[maxValue] += 1
#
# for numSents, freq in sorted(localDict2.items()):
# print('%5d %5d' % (numSents, freq))
#
# headerInfo = '\nInput from file: %s\n' % (inFileName)
# headerInfo += 'Column 1: lengths of sentences (draft)\n'
# headerInfo += 'Column 2: percent of total\n'
# headerInfo += 'Column 3: raw numbers of total\n'
# headerInfo += 'Column 4: the histogram\n'
# label = 'SENTENCE LENGTHS FOR TYPE %d ' % (theType)
# label += headerInfo
#
# if len(localDict2) > 0:
# histo, shortVersion = Histogram.histoTheData(label, localDict2, 1, 1, fout)
# print('%s\n' % (histo))
# fout.write('%s\n' % (histo))
## shortStuff.append([type, shortVersion])
# else:
# print('NO SENTENCE LENGTHS FOR %s\n' % (label))
# fout.write('NO SENTENCE LENGTHS FOR %s\n' % (label))
######################################################################
## word count histogram code
def histoWordCounts(theData, theType, which, outFile):
localDict = defaultdict(int)
for key, sent in sorted(theData.items()):
if (0 != theType) and (theType != sent.getType()): continue
if which != sent.getWhich(): continue
name = sent.getName()
sentenceLength = sent.getSentenceLength()
localDict[name] += sentenceLength
# print('SENT %14s %3d %3d' % (name, sentenceLength, localDict[name]))
localDict2 = defaultdict(int)
for name, wordCount in sorted(localDict.items()):
# print('%6s %5d' % (name, wordCount))
localDict2[wordCount] += 1
# for numSents, freq in sorted(localDict2.items()):
# print('%5d %5d' % (numSents, freq))
headerInfo = '\nInput from file: %s\n' % (inFileName)
headerInfo += 'Column 1: # of words ' + which + '\n'
headerInfo += 'Column 2: percent of total\n'
headerInfo += 'Column 3: raw numbers of total\n'
headerInfo += 'Column 4: the histogram\n'
label = 'WORD COUNTS FOR TYPE %d ' % (theType)
print('%s %s' % (label, which))
label += headerInfo
if len(localDict2) > 0:
histo, shortVersion = Histogram.histoTheData(label, localDict2, 50, 50, outFile)
# print('%s\n' % (histo))
outFile.write('%s\n' % (histo))
# shortStuff.append([type, shortVersion])
else:
# print('NO SENTENCES FOR %s\n' % (label))
outFile.write('NO SENTENCES FOR %s\n' % (label))
######################################################################
## parse the data in a line
def parseTheLine(theLineSplit):
newLine = []
lead = theLineSplit[0]
label = theLineSplit[1]
lead = lead.replace(':', '')
paperNumber = lead[0:3]
paperType = lead[-1]
# print('LINE %s' % (theLineSplit))
# print('NUMBER TYPE %s %s' % (paperNumber, paperType))
# print('LABEL %s %s' % (paperNumber, label))
if 'DELETIONS' in label:
newLine = [label, paperNumber, paperType, \
theLineSplit[3],theLineSplit[4]]
if 'EDITDISTS' in label:
newLine = [label, paperNumber, paperType, \
theLineSplit[3],theLineSplit[4]]
if 'INSERTBYDIST' in label:
newLine = [label, paperNumber, paperType, \
theLineSplit[3],theLineSplit[4]]
if 'INSERTIONS' in label:
newLine = [label, paperNumber, paperType, \
theLineSplit[3],theLineSplit[4]]
if 'SIMILARITY' in label:
newLine = [label, paperNumber, paperType, \
theLineSplit[2], theLineSplit[3],theLineSplit[4]]
if 'PARACOUNTS' in label:
# print('zozozozoz %s' % (theLineSplit))
newLine = [label, paperNumber, paperType, \
theLineSplit[2], theLineSplit[3]]
# print('zozozozoz %s' % (newLine))
if 'SENTENCECOUNTS' in label:
# print('zfzfzfzfz %s' % (theLineSplit))
newLine = [label, paperNumber, paperType, \
theLineSplit[2], theLineSplit[3]]
# print('zfzfzfzfz %s' % (newLine))
if 'SENTENCELENGTH' in label:
# print('zfzfzfzfz %s' % (theLineSplit))
newLine = [label, paperNumber, paperType, \
theLineSplit[2]]
# print('zfzfzfzfz %s' % (newLine))
if ('ALIGN' in label) and ('COUNT' in label):
newLine = [label, paperNumber, paperType, \
theLineSplit[2], theLineSplit[4], \
theLineSplit[5], theLineSplit[6]]
# print('zyzyzyzyz %s' % (theLineSplit))
# print('zyzyzyzyz %s' % (newLine))
if 'EDITDISTFRACTION' in label:
newLine = [label, paperNumber, paperType, \
theLineSplit[4],theLineSplit[5], \
theLineSplit[6],theLineSplit[7]]
if 'SENTENCESBAGS' in label:
newLine = [label, paperNumber, paperType, \
theLineSplit[4],theLineSplit[5], \
theLineSplit[6],theLineSplit[7]]
return newLine
######################################################################
##
def printDict(label, theDict):
print('\n%s' %(label))
fout.write('\n%s\n' %(label))
for key, value in sorted(theDict.items()):
print('%5d %5d' % (key, value))
fout.write('%5d %5d\n' % (key, value))
print('')
fout.write('\n')
######################################################################
## read the data and return dictionaries of (count, freq) pairs
## including the counts for which the freq is zero
def readTheData(inFileName):
theData = defaultdict()
dataFile = open(inFileName)
for line in dataFile:
sent = HistoSentence(line)
lineSplit = line.split()
# ok, here's a hiccup
# we want both draft and final to sort in order of their
# own sentence subs
if 'DRAFT' == lineSplit[2]:
key = '%6s %5s %4s %4s' % (lineSplit[1], lineSplit[2], \
lineSplit[7], lineSplit[12])
elif 'FINAL' == lineSplit[2]:
key = '%6s %5s %4s %4s' % (lineSplit[1], lineSplit[2], \
lineSplit[12], lineSplit[7])
else:
print('ERROR: sentence is neither DRAFT nor FINAL %s' %(sent))
sys.exit()
sent.checkInternalCorrectness()
theData[key] = sent
return theData
######################################################################
## do the scatter plots of edit distance fractions against bags
def scatterplot(theData, whichEvent, fileName):
xCoords = []
yCoordsDraft = []
yCoordsFinal = []
for line in theData:
if whichEvent not in line[0]: continue
xCoords.append(line[3])
yCoordsDraft.append(line[4])
yCoordsFinal.append(line[5])
plt.scatter(xCoords, yCoordsDraft)
plt.xlabel('Plot (draft) ' + whichEvent)
plt.savefig(fileName + 'Draft')
# plt.show()
plt.clf()
plt.cla()
plt.scatter(xCoords, yCoordsFinal)
plt.xlabel('Plot (final) ' + whichEvent)
plt.savefig(fileName + 'Final')
# plt.show()
plt.clf()
plt.cla()
plt.scatter(yCoordsDraft, yCoordsFinal)
plt.xlabel('Plot draft bag against final bag ' + whichEvent)
plt.savefig(fileName + 'BagsDraftAgainstFinal')
# plt.show()
######################################################################
## do the scatter plots of edit distance fractions against bags
def scatterplotB(theData, whichEvent, fileName):
# import numpy as np
# import matplotlib.pyplot as plt
xCoords = []
yCoords5 = []
yCoords6 = []
for line in theData:
if whichEvent not in line[0]: continue
xCoords.append(line[3])
yCoords5.append(line[5])
yCoords6.append(line[6])
# print('SCATTERA ', xCoords)
# print('SCATTERB ', yCoords5)
plt.scatter(xCoords, yCoords5)
# print('SCATTERC ')
plt.xlabel('Plot (draft) ' + whichEvent)
plt.savefig(fileName + 'Draft')
# plt.show()
# print('SCATTERD ')
# print('SCATTERE ', xCoords)
# print('SCATTERF ', yCoords5)
plt.scatter(xCoords, yCoords6)
# print('SCATTERG ')
plt.xlabel('Plot (final) ' + whichEvent)
plt.savefig(fileName + 'Final')
# plt.show()
# print('SCATTERH ')
######################################################################
## MAIN PROGRAM STARTS HERE
##
checkArgs(3, "usage: a.out inFileName outFileName")
TAG = 'FYEHISTO:'
inFileName = sys.argv[1]
outFileName = sys.argv[2]
if 'stdout' == outFileName:
fout = sys.stdout
else:
fout = open(outFileName, 'w')
print("%s INFILE='%s' OUTFILE='%s'" % (TAG, inFileName, outFileName))
fout.write("%s INFILE='%s' OUTFILE='%s'\n" % (TAG, inFileName, outFileName))
# first we have to read the data
theData = readTheData(inFileName)
# and we check that what we have read makes sense
for key, value in sorted(theData.items()):
# print('%22s %s' % (key, value))
thisSent = value
if thisSent.isAligned():
keySplit = key.split()
if keySplit[3] != ALIGNMENTDUMMYSUB:
if 'DRAFT' == keySplit[1]:
keyLabel = 'FINAL'
else:
keyLabel = 'DRAFT'
alignedSentKey = '%6s %5s %4s %4s' % (keySplit[0], keyLabel, \
keySplit[3], keySplit[2])
alignedSent = theData[alignedSentKey]
thisSent.checkAlignedSentences(alignedSent)
# create the local data dict, and while we are at it, find
# the last para number for each document
lastParaNumDict = defaultdict(int)
for key, value in sorted(theData.items()):
keysplit = key.split()
if 'DRAFT' == keysplit[1]:
lastParaNumDict[key.split()[0]+' '+key.split()[1]] = value.getLeftParaSub()
elif 'FINAL' == keysplit[1]:
lastParaNumDict[key.split()[0]+' '+key.split()[1]] = value.getRightParaSub()
#fout.write('LAST PARA NUM SIZE %4d\n' % (len(lastParaNumDict)))
#for key, value in lastParaNumDict.items():
# fout.write('LAST PARA NUM %s %4d\n' % (key, value))
######################################################################
##
exceptionlist = []
## list of papers that are complete rewrites
#exceptionlist = ['056_1', '085_4', '114_1', '118_2', '134_1', \
# '306_1', '327_2', '344_3', '358_3', '613_1', '639_1']
## list of papers for which para zero is a complete rewrite
#exceptionlist = ['003_1', '013_1', '017_1', '018_2', '019_3', \
# '032_1', '033_2', '034_3', '038_4', '039_1', \
# '050_1', '051_1', '056_1', '064_2', '065_2', \
# '066_2', '071_2', '073_2', '079_2', '081_3', \
# '085_4', '087_4', '088_4', '091_4', '114_1', \
# '118_2', '123_1', '134_1', '135_2', '138_1', \
# '152_2', '155_3', '168_1', '303_1', '306_1', \
# '307_1', '309_1', '317_1', '318_1', '326_2', \
# '327_2', '344_3', '348_3', '350_3', '353_3', \
# '358_3', '361_4', '371_4', '423_3', '507_1', \
# '510_1', '516_1', '519_1', '520_1', '533_2', \
# '538_2', '539_2', '551_3', '553_3', '568_4', \
# '605_1', '610_1', '613_1', '619_1', '622_1', \
# '637_1', '639_1', '678_1', '680_1']
# first we are going to track the destination para of the sentences
# from the draft
filters = ['ALL', 'FIRST', 'MIDDLE', 'LAST']
for theFilter in filters:
# print('XX%sXX' % (theFilter))
paradict = defaultdict(int)
for key, value in sorted(theData.items()):
# print('XX%sXX YY%sYY' % (key, value))
keysplit = key.split()
docname = keysplit[0]
if 'FINAL' == keysplit[1]: continue
if keysplit[0] in exceptionlist: continue
lastParaKey = docname + ' ' + 'DRAFT'
lastParaNumDraft = lastParaNumDict[lastParaKey]
lastParaKey = docname + ' ' + 'FINAL'
lastParaNumFinal = lastParaNumDict[lastParaKey]
leftParaSub = value.getLeftParaSub()
rightParaSub = value.getRightParaSub()
if ('ALL' == theFilter):
paradict[rightParaSub] += 1
if ('FIRST' == theFilter):
if (0 != leftParaSub):
continue
else:
paradict[rightParaSub] += 1
if ('LAST' == theFilter):
if (lastParaNumDraft != leftParaSub):
continue
else:
if 0 == lastParaNumFinal:
paradict[0] += 1
elif rightParaSub == lastParaNumFinal:
paradict[2] += 1
elif rightParaSub > 0:
paradict[1] += 1
else:
paradict[rightParaSub] += 1
if ('MIDDLE' == theFilter):
if (0 == leftParaSub) or (lastParaNumDraft == leftParaSub):
continue
else:
if 0 == lastParaNumFinal:
paradict[0] += 1
elif rightParaSub == lastParaNumFinal:
paradict[2] += 1
elif rightParaSub > 0:
paradict[1] += 1
else:
paradict[rightParaSub] += 1
# fout.write('XX%sXX YY%sYY\n' % (key, value))
fout.write('\nDEST OF DRAFT for %s\n' % (theFilter))
totalcount = 0
for parasub, count in sorted(paradict.items()):
totalcount += count
for parasub, count in sorted(paradict.items()):
fout.write('DEST OF DRAFT sub, count %4d %5d %8.2f\n' % \
(parasub, count, float(100.0*count)/float(totalcount)))
fout.write('DEST OF DRAFT TOTAL %8d\n' % (totalcount))
# then we are going to track the source para of the sentences
# from the final
filters = ['FIRST', 'MIDDLE', 'LAST']
for theFilter in filters:
# print('XX%sXX' % (theFilter))
paradict = defaultdict(int)
for key, value in sorted(theData.items()):
# print('XX%sXX YY%sYY' % (key, value))
keysplit = key.split()
docname = keysplit[0]
if 'DRAFT' == keysplit[1]: continue
if keysplit[0] in exceptionlist: continue
lastParaKey = docname + ' ' + 'DRAFT'
lastParaNumDraft = lastParaNumDict[lastParaKey]
lastParaKey = docname + ' ' + 'FINAL'
lastParaNumFinal = lastParaNumDict[lastParaKey]
leftParaSub = value.getLeftParaSub()
rightParaSub = value.getRightParaSub()
if ('ALL' == theFilter):
paradict[leftParaSub] += 1
if ('FIRST' == theFilter):
if (0 != rightParaSub):
continue
else:
paradict[leftParaSub] += 1
if ('LAST' == theFilter):
if (lastParaNumFinal != rightParaSub):
continue
else:
if 0 == lastParaNumDraft:
paradict[0] += 1
elif leftParaSub == lastParaNumDraft:
paradict[2] += 1
elif leftParaSub > 0:
paradict[1] += 1
else:
paradict[leftParaSub] += 1
if ('MIDDLE' == theFilter):
if (0 == rightParaSub) or (lastParaNumFinal == rightParaSub):
continue
else:
if 0 == lastParaNumDraft:
paradict[0] += 1
elif leftParaSub == lastParaNumDraft:
paradict[2] += 1
elif leftParaSub > 0:
paradict[1] += 1
else:
paradict[leftParaSub] += 1
# fout.write('XX%sXX YY%sYY\n' % (key, value))
fout.write('\nSOURCE OF FINAL for %s\n' % (theFilter))
totalcount = 0
for parasub, count in sorted(paradict.items()):
totalcount += count
for parasub, count in sorted(paradict.items()):
fout.write('SOURCE OF FINAL sub, count %4d %5d %8.2f\n' % \
(parasub, count, float(100.0*count)/float(totalcount)))
fout.write('SOURCE OF FINAL TOTAL %8d\n\n' % (totalcount))
# now figure out how many para zero insertions are at the top of the para
# and how many are at the bottom of the para
#
# first the top
oldkey = ''
noalignmentsyet = True
firstparasentencecount = 0
initialinsertions = 0
firstParaInsertions = 0
for key, value in sorted(theData.items()):
keysplit = key.split()
if 'DRAFT' == keysplit[1]: continue
if keysplit[0] in exceptionlist: continue
if value.getRightParaSub() > 0: continue
if value.getLeftParaSub() < 0: firstParaInsertions += 1
firstparasentencecount += 1
# fout.write('XX%sXX YY%sYY\n' % (key, value))
if keysplit[0] != oldkey:
oldkey = keysplit[0]
noalignmentsyet = True
# fout.write('RESET, NEW DOCUMENT XX%sXX\n' % (key))
if noalignmentsyet:
if value.getLeftParaSub() < 0:
# fout.write('INIT INSERTION %s\n' % (value))
initialinsertions += 1
else:
noalignmentsyet = False
# then the bottom
oldkey = ''
noalignmentsyet = True
firstparasentencecount = 0
trailinginsertions = 0
firstParaInsertions = 0
for key, value in reversed(sorted(theData.items())):
keysplit = key.split()
if 'DRAFT' == keysplit[1]: continue
if keysplit[0] in exceptionlist: continue
if value.getRightParaSub() > 0: continue
if value.getLeftParaSub() < 0: firstParaInsertions += 1
firstparasentencecount += 1
# fout.write('XX%sXX YY%sYY\n' % (key, value))
if keysplit[0] != oldkey:
oldkey = keysplit[0]
noalignmentsyet = True
# fout.write('RESET, NEW DOCUMENT XX%sXX\n' % (key))
if noalignmentsyet:
if value.getLeftParaSub() < 0:
# fout.write('TRAILING INSERTION %s\n' % (value))
trailinginsertions += 1
else:
noalignmentsyet = False
fout.write('FIRST PARA SENTENCE COUNT %8d\n' % (firstparasentencecount))
fout.write('FIRSTPARA INSERTION COUNT %8d\n' % (firstParaInsertions))
fout.write('INITIAL INSERTION COUNT %8d\n' % (initialinsertions))
fout.write('TRAILING INSERTION COUNT %8d\n' % (trailinginsertions))
# now we compute the number of papers that are total rewrites in the first
# paragraph
sentencecount = defaultdict(int)
insertedsentencecount = defaultdict(int)
for key, value in sorted(theData.items()):
keysplit = key.split()
if 'DRAFT' == keysplit[1]: continue
if keysplit[0] in exceptionlist: continue
if value.getRightParaSub() > 0: continue
sentencecount[keysplit[0]] += 1
if value.getLeftParaSub() < 0:
insertedsentencecount[keysplit[0]] += 1
fracinsertedhisto = defaultdict(int)
for key, value in sorted(sentencecount.items()):
fracinserted = 100.0 * float(insertedsentencecount[key]) / float(value)
fracinserted = 5 * (int(fracinserted) // 5)
if 100 == int(fracinserted):
fout.write('%10s %6d\n' % (key, int(fracinserted)))
fracinsertedhisto[int(fracinserted)] += 1
totalcountofpapers = 0
for key, value in sorted(fracinsertedhisto.items()):
totalcountofpapers += value
runningcount = 0
for key, value in sorted(fracinsertedhisto.items()):
runningcount += value
fraction = float(runningcount) / float(totalcountofpapers)
fout.write('FRAC INSERTED %5d %6d %6d %8.3f\n' % \
(key, value, runningcount, fraction))
| gpl-2.0 |
BhallaLab/moose-full | moose-examples/tutorials/ChemicalBistables/propagationBis.py | 2 | 6232 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
"""
This example illustrates propagation of state flips in a
linear 1-dimensional reaction-diffusion system. It uses a
bistable system loaded in from a kkit definition file, and
places this in a tapering cylinder for pseudo 1-dimentionsional
diffusion.
This example illustrates a number of features of reaction-diffusion
calculations.
First, it shows how to set up such systems. Key steps are to create
the compartment and define its voxelization, then create the Ksolve,
Dsolve, and Stoich. Then we assign stoich.compartment, ksolve and
dsolve in that order. Finally we assign the path of the Stoich.
For running the model, we start by introducing
a small symmetry-breaking increment of concInit
of the molecule **b** in the last compartment on the cylinder. The model
starts out with molecules at equal concentrations, so that the system would
settle to the unstable fixed point. This symmetry breaking leads
to the last compartment moving towards the state with an
increased concentration of **b**,
and this effect propagates to all other compartments.
Once the model has settled to the state where **b** is high throughout,
we simply exchange the concentrations of **b** with **c** in the left
half of the cylinder. This introduces a brief transient at the junction,
which soon settles to a smooth crossover.
Finally, as we run the simulation, the tapering geometry comes into play.
Since the left hand side has a larger diameter than the right, the
state on the left gradually wins over and the transition point slowly
moves to the right.
"""
import math
import numpy
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import moose
def makeModel():
# create container for model
r0 = 1e-6 # m
r1 = 0.5e-6 # m. Note taper.
num = 200
diffLength = 1e-6 # m
comptLength = num * diffLength # m
diffConst = 20e-12 # m^2/sec
concA = 1 # millimolar
diffDt = 0.02 # for the diffusion
chemDt = 0.2 # for the reaction
mfile = '../../genesis/M1719.g'
model = moose.Neutral( 'model' )
compartment = moose.CylMesh( '/model/kinetics' )
# load in model
modelId = moose.loadModel( mfile, '/model', 'ee' )
a = moose.element( '/model/kinetics/a' )
b = moose.element( '/model/kinetics/b' )
c = moose.element( '/model/kinetics/c' )
ac = a.concInit
bc = b.concInit
cc = c.concInit
compartment.r0 = r0
compartment.r1 = r1
compartment.x0 = 0
compartment.x1 = comptLength
compartment.diffLength = diffLength
assert( compartment.numDiffCompts == num )
# Assign parameters
for x in moose.wildcardFind( '/model/kinetics/##[ISA=PoolBase]' ):
#print 'pools: ', x, x.name
x.diffConst = diffConst
# Make solvers
ksolve = moose.Ksolve( '/model/kinetics/ksolve' )
dsolve = moose.Dsolve( '/model/dsolve' )
# Set up clocks.
moose.setClock( 10, diffDt )
for i in range( 11, 17 ):
moose.setClock( i, chemDt )
stoich = moose.Stoich( '/model/kinetics/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/kinetics/##"
print 'dsolve.numPools, num = ', dsolve.numPools, num
b.vec[num-1].concInit *= 1.01 # Break symmetry.
def main():
runtime = 100
displayInterval = 2
makeModel()
dsolve = moose.element( '/model/dsolve' )
moose.reinit()
#moose.start( runtime ) # Run the model for 10 seconds.
a = moose.element( '/model/kinetics/a' )
b = moose.element( '/model/kinetics/b' )
c = moose.element( '/model/kinetics/c' )
img = mpimg.imread( 'propBis.png' )
#imgplot = plt.imshow( img )
#plt.show()
plt.ion()
fig = plt.figure( figsize=(12,10) )
png = fig.add_subplot(211)
imgplot = plt.imshow( img )
ax = fig.add_subplot(212)
ax.set_ylim( 0, 0.001 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Position along cylinder (microns)' )
pos = numpy.arange( 0, a.vec.conc.size, 1 )
line1, = ax.plot( pos, a.vec.conc, 'r-', label='a' )
line2, = ax.plot( pos, b.vec.conc, 'g-', label='b' )
line3, = ax.plot( pos, c.vec.conc, 'b-', label='c' )
timeLabel = plt.text(60, 0.0009, 'time = 0')
plt.legend()
fig.canvas.draw()
for t in range( displayInterval, runtime, displayInterval ):
moose.start( displayInterval )
line1.set_ydata( a.vec.conc )
line2.set_ydata( b.vec.conc )
line3.set_ydata( c.vec.conc )
timeLabel.set_text( "time = %d" % t )
fig.canvas.draw()
print 'Swapping concs of b and c in half the cylinder'
for i in range( b.numData/2 ):
temp = b.vec[i].conc
b.vec[i].conc = c.vec[i].conc
c.vec[i].conc = temp
newruntime = 200
for t in range( displayInterval, newruntime, displayInterval ):
moose.start( displayInterval )
line1.set_ydata( a.vec.conc )
line2.set_ydata( b.vec.conc )
line3.set_ydata( c.vec.conc )
timeLabel.set_text( "time = %d" % (t + runtime) )
fig.canvas.draw()
print( "Hit 'enter' to exit" )
raw_input()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-2.0 |
mne-tools/mne-tools.github.io | 0.19/_downloads/f911a8ff6ce16e7e3e5057bbf8b5a690/plot_stats_cluster_time_frequency_repeated_measures_anova.py | 2 | 10044 | """
.. _tut-timefreq-twoway-anova:
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to multiple
comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332'
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
epochs.pick_channels([ch_name]) # restrict example to one channel
###############################################################################
# We have to make sure all conditions have the same counts, as the ANOVA
# expects a fully balanced data matrix and does not forgive imbalances that
# generously (risk of type-I error).
epochs.equalize_event_counts(event_id)
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = freqs / freqs[0]
zero_mean = False # don't correct morlet wavelet to be of mean zero
# To have a true wavelet zero_mean should be True but here for illustration
# purposes it helps to spot the evoked response.
###############################################################################
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k] for k in event_id]:
this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,
decim=decim, average=False, zero_mean=zero_mean,
return_itc=False)
this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))
this_power = this_tfr.data[:, 0, :, :] # we only have one channel.
epochs_power.append(this_power)
###############################################################################
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# factor levels for each factor.
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] // n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_freqs = len(freqs)
times = 1e3 * epochs.times[::decim]
n_times = len(times)
###############################################################################
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension.
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# reshape last two dimensions in one mass-univariate observation-vector
data = data.reshape(n_replications, n_conditions, n_freqs * n_times)
# so we have replications * conditions * observations:
print(data.shape)
###############################################################################
# While the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# .. table:: Sample data layout
#
# ===== ==== ==== ==== ====
# trial A1B1 A1B2 A2B1 B2B2
# ===== ==== ==== ==== ====
# 1 1.34 2.53 0.97 1.74
# ... ... ... ... ...
# 56 2.45 7.90 3.09 4.76
# ===== ==== ==== ==== ====
#
# Now we're ready to run our repeated measures ANOVA.
#
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
# let's visualize our effects by computing f-images
for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
plt.figure()
# show naive F-values in gray
plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
# create mask for significant Time-frequency locations
effect = np.ma.masked_array(effect, [sig > .05])
plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
plt.show()
###############################################################################
# Account for multiple comparisons using FDR versus permutation clustering test
# -----------------------------------------------------------------------------
#
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
# The following function catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None)
###############################################################################
# Create new stats image with only significant clusters:
good_clusters = np.where(cluster_p_values < .05)[0]
T_obs_plot = np.ma.masked_array(T_obs,
np.invert(clusters[np.squeeze(good_clusters)]))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" cluster-level corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Now using FDR:
mask, _ = fdr_correction(pvals[2])
T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" FDR corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Both cluster level and FDR correction help get rid of
# potential spots we saw in the naive f-images.
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/feature_selection/tests/test_base.py | 98 | 3681 | import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| mit |
adelomana/schema | conditionedFitness/figureMutagenized/script.2.3.py | 2 | 2965 | import matplotlib,numpy,sys,scipy,pickle
import matplotlib.pyplot
sys.path.append('../lib')
import calculateStatistics
### MAIN
matplotlib.rcParams.update({'font.size':36,'font.family':'Times New Roman','xtick.labelsize':28,'ytick.labelsize':28})
thePointSize=12
jarDir='/Users/adriandelomana/scratch/'
# mutagenized 2.3
xSignal=numpy.array([[205,162,175,200,150],[35,33,50,48,45]])
xNoSignal=numpy.array([[190,166,175,145,139],[47,49,33,31,36]])
cf_mu_0, cf_sd_0, pvalue_0 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[126,116,139,114,112],[55,53,56,54,46]])
xNoSignal=numpy.array([[115,143,135,123,142],[60,70,62,71,65]])
cf_mu_50, cf_sd_50, pvalue_50 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[97,126,132,140,168],[124,105,124,114]])
xNoSignal=numpy.array([[139,130,157,132,120],[113,150,116,95,127]])
cf_mu_100, cf_sd_100, pvalue_100 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[161,149,143,154,140],[155,134,131,167,151]])
xNoSignal=numpy.array([[148,176,172,184,185],[141,172,160,146,140]])
cf_mu_150, cf_sd_150, pvalue_150 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[198,149,151,203,168],[133,143,147,139,144]])
xNoSignal=numpy.array([[193,187,195,183,171],[147,149,147,145,136]])
cf_mu_200, cf_sd_200, pvalue_200 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[154,177,177,176,160],[159,176,177,172,179]])
xNoSignal=numpy.array([[171,161,220,194,180],[142,170,143,150,162]])
cf_mu_250, cf_sd_250, pvalue_250 = calculateStatistics.main(xSignal, xNoSignal)
x = [0, 50, 100, 150, 200, 250]
y = [cf_mu_0, cf_mu_50, cf_mu_100, cf_mu_150, cf_mu_200, cf_mu_250]
z = [cf_sd_0, cf_sd_50, cf_sd_100, cf_sd_150, cf_sd_200, cf_sd_250]
w = [pvalue_0, pvalue_50, pvalue_100, pvalue_150, pvalue_200, pvalue_250]
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt=':o',color='red',ecolor='red',markeredgecolor='red',capsize=0,ms=thePointSize,mew=0)
for i in range(len(w)):
if y[i] > 0.:
sp=y[i]+z[i]+0.02
else:
sp=y[i]-z[i]-0.02
if w[i] < 0.05 and w[i] >= 0.01:
matplotlib.pyplot.scatter(x[i], sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
if w[i] < 0.01:
matplotlib.pyplot.scatter(x[i]-3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.scatter(x[i]+3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.4,0.4])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.mutagenized.2.3.pdf')
matplotlib.pyplot.clf()
# save processed data alternative plotting
trajectory=[x,y,z]
jarFile=jarDir+'mutagenized.2.3.pickle'
f=open(jarFile,'wb')
pickle.dump(trajectory,f)
f.close()
| gpl-3.0 |
kerimlcr/ab2017-dpyo | ornek/osmnx/osmnx-0.3/osmnx/save_load.py | 1 | 16292 | ###################################################################################################
# Module: save_load.py
# Description: Save and load networks to/from disk
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/gboeing/osmnx
###################################################################################################
import re
import time
import os
import ast
import numpy as np
import pandas as pd
import geopandas as gpd
import networkx as nx
from shapely.geometry import Point, LineString
from shapely import wkt
from . import globals
from .utils import log, make_str
def save_gdf_shapefile(gdf, filename=None, folder=None):
"""
Save GeoDataFrame as an ESRI shapefile.
Parameters
----------
gdf : GeoDataFrame, the gdf to be saved
filename : string, what to call the shapefile (file extensions are added automatically)
folder : string, where to save the shapefile, if none, then default folder
Returns
-------
None
"""
if folder is None:
folder = globals.data_folder
if filename is None:
filename = make_shp_filename(gdf.gdf_name)
# give the save folder a filename subfolder to make the full path to the files
folder_path = '{}/{}'.format(folder, filename)
# if the save folder does not already exist, create it with a filename subfolder
if not os.path.exists(folder_path):
os.makedirs(folder_path)
gdf.to_file(folder_path)
if not hasattr(gdf, 'gdf_name'):
gdf.gdf_name = 'unnamed'
log('Saved the GeoDataFrame "{}" as shapefile "{}"'.format(gdf.gdf_name, folder_path))
def save_graph_shapefile(G, filename='graph', folder=None, encoding='utf-8'):
"""
Save graph nodes and edges as ESRI shapefiles to disk.
Parameters
----------
G : graph
filename : string, the name of the shapefiles (not including file extensions)
folder : string, the folder to contain the shapefiles, if None, use default data folder
encoding : string, the character encoding for the saved shapefiles
Returns
-------
None
"""
start_time = time.time()
if folder is None:
folder = globals.data_folder
# convert directed graph G to an undirected graph for saving as a shapefile
G_save = G.copy()
G_save = get_undirected(G_save)
# create a GeoDataFrame of the nodes and set CRS
nodes = {node:data for node, data in G_save.nodes(data=True)}
gdf_nodes = gpd.GeoDataFrame(nodes).T
gdf_nodes.crs = G_save.graph['crs']
# create a geometry column then drop the x and y columns
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf_nodes = gdf_nodes.drop(['x', 'y'], axis=1)
# make everything but geometry column a string
gdf_nodes['osmid'] = gdf_nodes['osmid'].astype(np.int64)
for col in [c for c in gdf_nodes.columns if not c == 'geometry']:
gdf_nodes[col] = gdf_nodes[col].fillna('').map(make_str)
# create a list to hold our edges, then loop through each edge in the graph
edges = []
for u, v, key, data in G_save.edges(keys=True, data=True):
# for each edge, add key and all attributes in data dict to the edge_details
edge_details = {'key':key}
for attr_key in data:
edge_details[attr_key] = data[attr_key]
# if edge doesn't already have a geometry attribute, create one now
if not 'geometry' in data:
point_u = Point((G_save.node[u]['x'], G_save.node[u]['y']))
point_v = Point((G_save.node[v]['x'], G_save.node[v]['y']))
edge_details['geometry'] = LineString([point_u, point_v])
edges.append(edge_details)
# create a geodataframe from the list of edges and set the CRS
gdf_edges = gpd.GeoDataFrame(edges)
gdf_edges.crs = G_save.graph['crs']
# make everything but geometry column a string
for col in [c for c in gdf_edges.columns if not c == 'geometry']:
gdf_edges[col] = gdf_edges[col].fillna('').map(make_str)
# if the save folder does not already exist, create it with a filename subfolder
folder = '{}/{}'.format(folder, filename)
if not os.path.exists(folder):
os.makedirs(folder)
# save the nodes and edges as separate ESRI shapefiles
gdf_nodes.to_file('{}/nodes'.format(folder), encoding=encoding)
gdf_edges.to_file('{}/edges'.format(folder), encoding=encoding)
log('Saved graph "{}" to disk as shapefiles at "{}" in {:,.2f} seconds'.format(G_save.name, folder, time.time()-start_time))
def save_graphml(G, filename='graph.graphml', folder=None):
"""
Save graph as GraphML file to disk.
Parameters
----------
G : graph
filename : string, the name of the graphml file (including file extension)
folder : string, the folder to contain the file, if None, use default data folder
Returns
-------
None
"""
start_time = time.time()
if folder is None:
folder = globals.data_folder
# create a copy and convert all the node/edge attribute values to string or it won't save
G_save = G.copy()
for dict_key in G_save.graph:
# convert all the graph attribute values to strings
G_save.graph[dict_key] = make_str(G_save.graph[dict_key])
for node, data in G_save.nodes(data=True):
for dict_key in data:
# convert all the node attribute values to strings
data[dict_key] = make_str(data[dict_key])
for u, v, key, data in G_save.edges(keys=True, data=True):
for dict_key in data:
# convert all the edge attribute values to strings
data[dict_key] = make_str(data[dict_key])
if not os.path.exists(folder):
os.makedirs(folder)
nx.write_graphml(G_save, '{}/{}'.format(folder, filename))
log('Saved graph "{}" to disk as GraphML at "{}/{}" in {:,.2f} seconds'.format(G_save.name, folder, filename, time.time()-start_time))
def load_graphml(filename, folder=None):
"""
Load a GraphML file from disk and convert the node/edge attributes to correct data types.
Parameters
----------
filename : string, the name of the graphml file (including file extension)
folder : string, the folder containing the file, if None, use default data folder
Returns
-------
G : graph
"""
start_time = time.time()
# read the graph from disk
if folder is None:
folder = globals.data_folder
path = '{}/{}'.format(folder, filename)
G = nx.MultiDiGraph(nx.read_graphml(path, node_type=int))
# convert graph crs attribute from saved string to correct dict data type
G.graph['crs'] = ast.literal_eval(G.graph['crs'])
if 'streets_per_node' in G.graph:
G.graph['streets_per_node'] = ast.literal_eval(G.graph['streets_per_node'])
# convert numeric node tags from string to numeric data types
log('Converting node and edge attribute data types')
for node, data in G.nodes(data=True):
data['osmid'] = int(data['osmid'])
data['x'] = float(data['x'])
data['y'] = float(data['y'])
# convert numeric, bool, and list node tags from string to correct data types
for u, v, key, data in G.edges(keys=True, data=True):
# first parse oneway to bool and length to float - they should always have only 1 value each
data['oneway'] = bool(data['oneway'])
data['length'] = float(data['length'])
# these attributes might have a single value, or a list if edge's topology was simplified
for attr in ['highway', 'name', 'bridge', 'tunnel', 'lanes', 'ref', 'maxspeed', 'service', 'access', 'area', 'landuse', 'width', 'est_width']:
# if this edge has this attribute, and it starts with '[' and ends with ']', then it's a list to be parsed
if attr in data and data[attr][0] == '[' and data[attr][-1] == ']':
# convert the string list to a list type, else leave as single-value string
data[attr] = ast.literal_eval(data[attr])
# osmid might have a single value or a list, but if single value, then parse int
if 'osmid' in data:
if data['osmid'][0] == '[' and data['osmid'][-1] == ']':
data['osmid'] = ast.literal_eval(data['osmid'])
else:
data['osmid'] = int(data['osmid'])
# if geometry attribute exists, load the string as well-known text to shapely LineString
if 'geometry' in data:
data['geometry'] = wkt.loads(data['geometry'])
log('Loaded graph with {:,} nodes and {:,} edges in {:,.2f} seconds from "{}"'.format(len(list(G.nodes())),
len(list(G.edges())),
time.time()-start_time,
path))
return G
def get_undirected(G):
"""
Convert a directed graph to an undirected graph that maintains parallel edges in opposite directions if geometries differ.
Parameters
----------
G : graph
Returns
-------
G_undir : Graph
"""
# set from/to nodes and then make undirected
G = G.copy()
for u, v, key in G.edges(keys=True):
G.edge[u][v][key]['from'] = u
G.edge[u][v][key]['to'] = v
G_undir = G.to_undirected(reciprocal=False)
# if edges in both directions (u,v) and (v,u) exist in the graph,
# attributes for the new undirected edge will be a combination of the attributes of the directed edges.
# if both edges exist in digraph and their edge data is different,
# only one edge is created with an arbitrary choice of which edge data to use.
# you need to manually retain edges in both directions between nodes if their geometries are different
# this is necessary to save shapefiles for weird intersections like the one at 41.8958697,-87.6794924
# find all edges (u,v) that have a parallel edge going the opposite direction (v,u) with a different osmid
for u, v, key, data in G.edges(keys=True, data=True):
try:
# look at each edge going the opposite direction (from v to u)
for key2 in G.edge[v][u]:
# if this edge has geometry and its osmid is different from its reverse's
if 'geometry' in data and not data['osmid'] == G.edge[v][u][key2]['osmid']:
# turn the geometry of each edge into lists of x's and y's
geom1 = [list(coords) for coords in data['geometry'].xy]
geom2 = [list(coords) for coords in G_undir[u][v][key]['geometry'].xy]
# reverse the first edge's list of x's and y's to look for a match in either order
geom1_r = [list(reversed(list(coords))) for coords in data['geometry'].xy]
# if the edge's geometry doesn't match its reverse's geometry in either order
if not (geom1 == geom2 or geom1_r == geom2):
# add it as a new edge to the graph to be saved (with key equal to the current largest key plus one)
new_key = max(G.edge[u][v]) + 1
G_undir.add_edge(u, v, new_key, **data)
except:
pass
return G_undir
def graph_to_gdfs(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True):
"""
Convert a graph into node and/or edge GeoDataFrames
Parameters
----------
G : graph
nodes : bool, if True, convert graph nodes to a GeoDataFrame and return it
edges : bool, if True, convert graph edges to a GeoDataFrame and return it
node_geometry : bool, if True, create a geometry column from node x and y data
fill_edge_geometry : bool, if True, fill in missing edge geometry fields using origin and destination nodes
Returns
-------
gdf_nodes : GeoDataFrame (optional)
gdf_edges : GeoDataFrame (optional)
"""
if not (nodes or edges):
raise ValueError('You must request nodes or edges, or both.')
to_return = []
if nodes:
start_time = time.time()
nodes = {node:data for node, data in G.nodes(data=True)}
gdf_nodes = gpd.GeoDataFrame(nodes).T
if node_geometry:
gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf_nodes.crs = G.graph['crs']
gdf_nodes.gdf_name = '{}_nodes'.format(G.graph['name'])
gdf_nodes['osmid'] = gdf_nodes['osmid'].astype(np.int64).map(make_str)
to_return.append(gdf_nodes)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_nodes.gdf_name, time.time()-start_time))
if edges:
start_time = time.time()
# create a list to hold our edges, then loop through each edge in the graph
edges = []
for u, v, key, data in G.edges(keys=True, data=True):
# for each edge, add key and all attributes in data dict to the edge_details
edge_details = {'u':u, 'v':v, 'key':key}
for attr_key in data:
edge_details[attr_key] = data[attr_key]
# if edge doesn't already have a geometry attribute, create one now if fill_edge_geometry==True
if not 'geometry' in data:
if fill_edge_geometry:
point_u = Point((G.node[u]['x'], G.node[u]['y']))
point_v = Point((G.node[v]['x'], G.node[v]['y']))
edge_details['geometry'] = LineString([point_u, point_v])
else:
edge_details['geometry'] = np.nan
edges.append(edge_details)
# create a GeoDataFrame from the list of edges and set the CRS
gdf_edges = gpd.GeoDataFrame(edges)
gdf_edges.crs = G.graph['crs']
gdf_edges.gdf_name = '{}_edges'.format(G.graph['name'])
to_return.append(gdf_edges)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_edges.gdf_name, time.time()-start_time))
if len(to_return) > 1:
return tuple(to_return)
else:
return to_return[0]
def gdfs_to_graph(gdf_nodes, gdf_edges):
"""
Convert node and edge GeoDataFrames into a graph
Parameters
----------
gdf_nodes : GeoDataFrame
gdf_edges : GeoDataFrame
Returns
-------
G : graph
"""
G = nx.MultiDiGraph()
G.graph['crs'] = gdf_nodes.crs
G.graph['name'] = gdf_nodes.gdf_name.rstrip('_nodes')
# add the nodes and their attributes to the graph
G.add_nodes_from(gdf_nodes.index)
attributes = gdf_nodes.to_dict()
for attribute_name in gdf_nodes.columns:
# only add this attribute to nodes which have a non-null value for it
attribute_values = {k:v for k, v in attributes[attribute_name].items() if pd.notnull(v)}
nx.set_node_attributes(G, attribute_name, attribute_values)
# add the edges and attributes that are not u, v, key (as they're added separately) or null
for _, row in gdf_edges.iterrows():
attrs = {}
for label, value in row.iteritems():
if (label not in ['u', 'v', 'key']) and (isinstance(value, list) or pd.notnull(value)):
attrs[label] = value
G.add_edge(u=row['u'], v=row['v'], key=row['key'], **attrs)
return G
def make_shp_filename(place_name):
"""
Create a filename string in a consistent format from a place name string.
Parameters
----------
place_name : string, place name to convert into a filename
Returns
-------
filename : string
"""
name_pieces = list(reversed(place_name.split(', ')))
filename = '-'.join(name_pieces).lower().replace(' ','_')
filename = re.sub('[^0-9a-zA-Z_-]+', '', filename)
return filename
| gpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/legend_handler.py | 4 | 22859 | """
This module defines default legend handlers.
It is strongly encouraged to have read the :ref:`legend guide
<plotting-guide-legend>` before this documentation.
Legend handlers are expected to be a callable object with a following
signature. ::
legend_handler(legend, orig_handle, fontsize, handlebox)
Where *legend* is the legend itself, *orig_handle* is the original
plot, *fontsize* is the fontsize in pixles, and *handlebox* is a
OffsetBox instance. Within the call, you should create relevant
artists (using relevant properties from the *legend* and/or
*orig_handle*) and add them into the handlebox. The artists needs to
be scaled according to the fontsize (note that the size is in pixel,
i.e., this is dpi-scaled value).
This module includes definition of several legend handler classes
derived from the base class (HandlerBase) with the following method.
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import numpy as np
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
def update_from_first_child(tgt, src):
tgt.update_from(src.get_children()[0])
class HandlerBase(object):
"""
A Base class for default legend handlers.
The derived classes are meant to override *create_artists* method, which
has a following signature.::
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
The overridden method needs to create artists of the given
transform that fits in the given dimension (xdescent, ydescent,
width, height) that are scaled by fontsize if necessary.
"""
def __init__(self, xpad=0., ypad=0., update_func=None):
self._xpad, self._ypad = xpad, ypad
self._update_prop_func = update_func
def _update_prop(self, legend_handle, orig_handle):
if self._update_prop_func is None:
self._default_update_prop(legend_handle, orig_handle)
else:
self._update_prop_func(legend_handle, orig_handle)
def _default_update_prop(self, legend_handle, orig_handle):
legend_handle.update_from(orig_handle)
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def adjust_drawing_area(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
):
xdescent = xdescent - self._xpad * fontsize
ydescent = ydescent - self._ypad * fontsize
width = width - self._xpad * fontsize
height = height - self._ypad * fontsize
return xdescent, ydescent, width, height
def legend_artist(self, legend, orig_handle,
fontsize, handlebox):
"""
Return the artist that this HandlerBase generates for the given
original artist/handle.
Parameters
----------
legend : :class:`matplotlib.legend.Legend` instance
The legend for which these legend artists are being created.
orig_handle : :class:`matplotlib.artist.Artist` or similar
The object for which these legend artists are being created.
fontsize : float or int
The fontsize in pixels. The artists being created should
be scaled according to the given fontsize.
handlebox : :class:`matplotlib.offsetbox.OffsetBox` instance
The box which has been created to hold this legend entry's
artists. Artists created in the `legend_artist` method must
be added to this handlebox inside this method.
"""
xdescent, ydescent, width, height = self.adjust_drawing_area(
legend, orig_handle,
handlebox.xdescent, handlebox.ydescent,
handlebox.width, handlebox.height,
fontsize)
artists = self.create_artists(legend, orig_handle,
xdescent, ydescent, width, height,
fontsize, handlebox.get_transform())
# create_artists will return a list of artists.
for a in artists:
handlebox.add_artist(a)
# we only return the first artist
return artists[0]
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
raise NotImplementedError('Derived must override')
class HandlerNpoints(HandlerBase):
def __init__(self, marker_pad=0.3, numpoints=None, **kw):
HandlerBase.__init__(self, **kw)
self._numpoints = numpoints
self._marker_pad = marker_pad
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.numpoints
else:
return self._numpoints
def get_xdata(self, legend, xdescent, ydescent, width, height, fontsize):
numpoints = self.get_numpoints(legend)
if numpoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(-xdescent + self._marker_pad * fontsize,
width - self._marker_pad * fontsize,
numpoints)
xdata_marker = xdata
elif numpoints == 1:
xdata = np.linspace(-xdescent, width, 2)
xdata_marker = [0.5 * width - 0.5 * xdescent]
return xdata, xdata_marker
class HandlerNpointsYoffsets(HandlerNpoints):
def __init__(self, numpoints=None, yoffsets=None, **kw):
HandlerNpoints.__init__(self, numpoints=numpoints, **kw)
self._yoffsets = yoffsets
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * legend._scatteryoffsets
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
class HandlerLine2D(HandlerNpoints):
"""
Handler for Line2D instances.
"""
def __init__(self, marker_pad=0.3, numpoints=None, **kw):
HandlerNpoints.__init__(self, marker_pad=marker_pad, numpoints=numpoints, **kw)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_drawstyle('default')
legline.set_marker("")
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(legline_marker, orig_handle, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correspondence.
legline._legmarker = legline_marker
legline.set_transform(trans)
legline_marker.set_transform(trans)
return [legline, legline_marker]
class HandlerPatch(HandlerBase):
"""
Handler for Patch instances.
"""
def __init__(self, patch_func=None, **kw):
"""
The HandlerPatch class optionally takes a function ``patch_func``
who's responsibility is to create the legend key artist. The
``patch_func`` should have the signature::
def patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
Subsequently the created artist will have its ``update_prop`` method
called and the appropriate transform will be applied.
"""
HandlerBase.__init__(self, **kw)
self._patch_func = patch_func
def _create_patch(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._patch_func is None:
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
else:
p = self._patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
p = self._create_patch(legend, orig_handle,
xdescent, ydescent, width, height, fontsize)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
class HandlerLineCollection(HandlerLine2D):
"""
Handler for LineCollection instances.
"""
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def _default_update_prop(self, legend_handle, orig_handle):
lw = orig_handle.get_linewidths()[0]
dashes = orig_handle._us_linestyles[0]
color = orig_handle.get_colors()[0]
legend_handle.set_color(color)
legend_handle.set_linestyle(dashes)
legend_handle.set_linewidth(lw)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self.update_prop(legline, orig_handle, legend)
legline.set_transform(trans)
return [legline]
class HandlerRegularPolyCollection(HandlerNpointsYoffsets):
"""
Handler for RegularPolyCollections.
"""
def __init__(self, yoffsets=None, sizes=None, **kw):
HandlerNpointsYoffsets.__init__(self, yoffsets=yoffsets, **kw)
self._sizes = sizes
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def get_sizes(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._sizes is None:
handle_sizes = orig_handle.get_sizes()
if not len(handle_sizes):
handle_sizes = [1]
size_max = max(handle_sizes) * legend.markerscale ** 2
size_min = min(handle_sizes) * legend.markerscale ** 2
numpoints = self.get_numpoints(legend)
if numpoints < 4:
sizes = [.5 * (size_max + size_min), size_max,
size_min][:numpoints]
else:
rng = (size_max - size_min)
sizes = rng * np.linspace(0, 1, numpoints) + size_min
else:
sizes = self._sizes
return sizes
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend_handle.set_figure(legend.figure)
#legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)(orig_handle.get_numsides(),
rotation=orig_handle.get_rotation(),
sizes=sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,
width, height, fontsize)
p = self.create_collection(orig_handle, sizes,
offsets=list(zip(xdata_marker, ydata)),
transOffset=trans)
self.update_prop(p, orig_handle, legend)
p._transOffset = trans
return [p]
class HandlerPathCollection(HandlerRegularPolyCollection):
"""
Handler for PathCollections, which are used by scatter
"""
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)([orig_handle.get_paths()[0]],
sizes=sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
class HandlerCircleCollection(HandlerRegularPolyCollection):
"""
Handler for CircleCollections
"""
def create_collection(self, orig_handle, sizes, offsets, transOffset):
p = type(orig_handle)(sizes,
offsets=offsets,
transOffset=transOffset,
)
return p
class HandlerErrorbar(HandlerLine2D):
"""
Handler for Errorbars
"""
def __init__(self, xerr_size=0.5, yerr_size=None,
marker_pad=0.3, numpoints=None, **kw):
self._xerr_size = xerr_size
self._yerr_size = yerr_size
HandlerLine2D.__init__(self, marker_pad=marker_pad, numpoints=numpoints,
**kw)
def get_err_size(self, legend, xdescent, ydescent, width, height, fontsize):
xerr_size = self._xerr_size * fontsize
if self._yerr_size is None:
yerr_size = xerr_size
else:
yerr_size = self._yerr_size * fontsize
return xerr_size, yerr_size
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
plotlines, caplines, barlinecols = orig_handle
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = ((height - ydescent) / 2.) * np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
xdata_marker = np.asarray(xdata_marker)
ydata_marker = np.asarray(ydata[:len(xdata_marker)])
xerr_size, yerr_size = self.get_err_size(legend, xdescent, ydescent,
width, height, fontsize)
legline_marker = Line2D(xdata_marker, ydata_marker)
# when plotlines are None (only errorbars are drawn), we just
# make legline invisible.
if plotlines is None:
legline.set_visible(False)
legline_marker.set_visible(False)
else:
self.update_prop(legline, plotlines, legend)
legline.set_drawstyle('default')
legline.set_marker('None')
self.update_prop(legline_marker, plotlines, legend)
legline_marker.set_linestyle('None')
if legend.markerscale != 1:
newsz = legline_marker.get_markersize() * legend.markerscale
legline_marker.set_markersize(newsz)
handle_barlinecols = []
handle_caplines = []
if orig_handle.has_xerr:
verts = [ ((x - xerr_size, y), (x + xerr_size, y))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker - xerr_size, ydata_marker)
capline_right = Line2D(xdata_marker + xerr_size, ydata_marker)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("|")
capline_right.set_marker("|")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
if orig_handle.has_yerr:
verts = [ ((x, y - yerr_size), (x, y + yerr_size))
for x, y in zip(xdata_marker, ydata_marker)]
coll = mcoll.LineCollection(verts)
self.update_prop(coll, barlinecols[0], legend)
handle_barlinecols.append(coll)
if caplines:
capline_left = Line2D(xdata_marker, ydata_marker - yerr_size)
capline_right = Line2D(xdata_marker, ydata_marker + yerr_size)
self.update_prop(capline_left, caplines[0], legend)
self.update_prop(capline_right, caplines[0], legend)
capline_left.set_marker("_")
capline_right.set_marker("_")
handle_caplines.append(capline_left)
handle_caplines.append(capline_right)
artists = []
artists.extend(handle_barlinecols)
artists.extend(handle_caplines)
artists.append(legline)
artists.append(legline_marker)
for artist in artists:
artist.set_transform(trans)
return artists
class HandlerStem(HandlerNpointsYoffsets):
"""
Handler for Errorbars
"""
def __init__(self, marker_pad=0.3, numpoints=None,
bottom=None, yoffsets=None, **kw):
HandlerNpointsYoffsets.__init__(self, marker_pad=marker_pad,
numpoints=numpoints,
yoffsets=yoffsets,
**kw)
self._bottom = bottom
def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):
if self._yoffsets is None:
ydata = height * (0.5 * legend._scatteryoffsets + 0.5)
else:
ydata = height * np.asarray(self._yoffsets)
return ydata
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
markerline, stemlines, baseline = orig_handle
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
if self._bottom is None:
bottom = 0.
else:
bottom = self._bottom
leg_markerline = Line2D(xdata_marker, ydata[:len(xdata_marker)])
self.update_prop(leg_markerline, markerline, legend)
leg_stemlines = []
for thisx, thisy in zip(xdata_marker, ydata):
l = Line2D([thisx, thisx], [bottom, thisy])
leg_stemlines.append(l)
for lm, m in zip(leg_stemlines, stemlines):
self.update_prop(lm, m, legend)
leg_baseline = Line2D([np.amin(xdata), np.amax(xdata)],
[bottom, bottom])
self.update_prop(leg_baseline, baseline, legend)
artists = [leg_markerline]
artists.extend(leg_stemlines)
artists.append(leg_baseline)
for artist in artists:
artist.set_transform(trans)
return artists
class HandlerTuple(HandlerBase):
"""
Handler for Tuple
"""
def __init__(self, **kwargs):
HandlerBase.__init__(self, **kwargs)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
handler_map = legend.get_legend_handler_map()
a_list = []
for handle1 in orig_handle:
handler = legend.get_legend_handler(handler_map, handle1)
_a_list = handler.create_artists(legend, handle1,
xdescent, ydescent, width, height,
fontsize,
trans)
a_list.extend(_a_list)
return a_list
class HandlerPolyCollection(HandlerBase):
"""
Handler for PolyCollection used in fill_between and stackplot.
"""
def _update_prop(self, legend_handle, orig_handle):
def first_color(colors):
if colors is None:
return None
colors = mcolors.to_rgba_array(colors)
if len(colors):
return colors[0]
else:
return "none"
def get_first(prop_array):
if len(prop_array):
return prop_array[0]
else:
return None
edgecolor = getattr(orig_handle, '_original_edgecolor',
orig_handle.get_edgecolor())
legend_handle.set_edgecolor(first_color(edgecolor))
facecolor = getattr(orig_handle, '_original_facecolor',
orig_handle.get_facecolor())
legend_handle.set_facecolor(first_color(facecolor))
legend_handle.set_fill(orig_handle.get_fill())
legend_handle.set_hatch(orig_handle.get_hatch())
legend_handle.set_linewidth(get_first(orig_handle.get_linewidths()))
legend_handle.set_linestyle(get_first(orig_handle.get_linestyles()))
legend_handle.set_transform(get_first(orig_handle.get_transforms()))
legend_handle.set_figure(orig_handle.get_figure())
legend_handle.set_alpha(orig_handle.get_alpha())
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
| bsd-2-clause |
samuel1208/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/reshape/merge/test_merge_ordered.py | 2 | 2966 | import pandas as pd
from pandas import DataFrame, merge_ordered
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
from numpy import nan
class TestMergeOrdered(object):
def setup_method(self, method):
self.left = DataFrame({'key': ['a', 'c', 'e'],
'lvalue': [1, 2., 3]})
self.right = DataFrame({'key': ['b', 'c', 'd', 'f'],
'rvalue': [1, 2, 3., 4]})
def test_basic(self):
result = merge_ordered(self.left, self.right, on='key')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1, nan, 2, nan, 3, nan],
'rvalue': [nan, 1, 2, 3, nan, 4]})
assert_frame_equal(result, expected)
def test_ffill(self):
result = merge_ordered(
self.left, self.right, on='key', fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'],
'lvalue': [1., 1, 2, 2, 3, 3.],
'rvalue': [nan, 1, 2, 3, 3, 4]})
assert_frame_equal(result, expected)
def test_multigroup(self):
left = pd.concat([self.left, self.left], ignore_index=True)
left['group'] = ['a'] * 3 + ['b'] * 3
result = merge_ordered(left, self.right, on='key', left_by='group',
fill_method='ffill')
expected = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'f'] * 2,
'lvalue': [1., 1, 2, 2, 3, 3.] * 2,
'rvalue': [nan, 1, 2, 3, 3, 4] * 2})
expected['group'] = ['a'] * 6 + ['b'] * 6
assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(self.right, left, on='key', right_by='group',
fill_method='ffill')
assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, self.right, on='key', left_by='group')
assert result['group'].notna().all()
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on='key')
assert isinstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat)
]
for df_seq, pattern in test_cases:
tm.assert_raises_regex(ValueError, pattern, pd.concat, df_seq)
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
| bsd-3-clause |
dsockwell/trading-with-python | lib/backtest.py | 74 | 7381 | #-------------------------------------------------------------------------------
# Name: backtest
# Purpose: perform routine backtesting tasks.
# This module should be useable as a stand-alone library outide of the TWP package.
#
# Author: Jev Kuznetsov
#
# Created: 03/07/2014
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
def tradeBracket(price,entryBar,upper=None, lower=None, timeout=None):
'''
trade a bracket on price series, return price delta and exit bar #
Input
------
price : numpy array of price values
entryBar: entry bar number, *determines entry price*
upper : high stop
lower : low stop
timeout : max number of periods to hold
Returns exit price and number of bars held
'''
assert isinstance(price, np.ndarray) , 'price must be a numpy array'
# create list of exit indices and add max trade duration. Exits are relative to entry bar
if timeout: # set trade length to timeout or series length
exits = [min(timeout,len(price)-entryBar-1)]
else:
exits = [len(price)-entryBar-1]
p = price[entryBar:entryBar+exits[0]+1] # subseries of price
# extend exits list with conditional exits
# check upper bracket
if upper:
assert upper>p[0] , 'Upper bracket must be higher than entry price '
idx = np.where(p>upper)[0] # find where price is higher than the upper bracket
if idx.any():
exits.append(idx[0]) # append first occurence
# same for lower bracket
if lower:
assert lower<p[0] , 'Lower bracket must be lower than entry price '
idx = np.where(p<lower)[0]
if idx.any():
exits.append(idx[0])
exitBar = min(exits) # choose first exit
return p[exitBar], exitBar
class Backtest(object):
"""
Backtest class, simple vectorized one. Works with pandas objects.
"""
def __init__(self,price, signal, signalType='capital',initialCash = 0, roundShares=True):
"""
Arguments:
*price* Series with instrument price.
*signal* Series with capital to invest (long+,short-) or number of shares.
*sitnalType* capital to bet or number of shares 'capital' mode is default.
*initialCash* starting cash.
*roundShares* round off number of shares to integers
"""
#TODO: add auto rebalancing
# check for correct input
assert signalType in ['capital','shares'], "Wrong signal type provided, must be 'capital' or 'shares'"
#save internal settings to a dict
self.settings = {'signalType':signalType}
# first thing to do is to clean up the signal, removing nans and duplicate entries or exits
self.signal = signal.ffill().fillna(0)
# now find dates with a trade
tradeIdx = self.signal.diff().fillna(0) !=0 # days with trades are set to True
if signalType == 'shares':
self.trades = self.signal[tradeIdx] # selected rows where tradeDir changes value. trades are in Shares
elif signalType =='capital':
self.trades = (self.signal[tradeIdx]/price[tradeIdx])
if roundShares:
self.trades = self.trades.round()
# now create internal data structure
self.data = pd.DataFrame(index=price.index , columns = ['price','shares','value','cash','pnl'])
self.data['price'] = price
self.data['shares'] = self.trades.reindex(self.data.index).ffill().fillna(0)
self.data['value'] = self.data['shares'] * self.data['price']
delta = self.data['shares'].diff() # shares bought sold
self.data['cash'] = (-delta*self.data['price']).fillna(0).cumsum()+initialCash
self.data['pnl'] = self.data['cash']+self.data['value']-initialCash
@property
def sharpe(self):
''' return annualized sharpe ratio of the pnl '''
pnl = (self.data['pnl'].diff()).shift(-1)[self.data['shares']!=0] # use only days with position.
return sharpe(pnl) # need the diff here as sharpe works on daily returns.
@property
def pnl(self):
'''easy access to pnl data column '''
return self.data['pnl']
def plotTrades(self):
"""
visualise trades on the price chart
long entry : green triangle up
short entry : red triangle down
exit : black circle
"""
l = ['price']
p = self.data['price']
p.plot(style='x-')
# ---plot markers
# this works, but I rather prefer colored markers for each day of position rather than entry-exit signals
# indices = {'g^': self.trades[self.trades > 0].index ,
# 'ko':self.trades[self.trades == 0].index,
# 'rv':self.trades[self.trades < 0].index}
#
#
# for style, idx in indices.iteritems():
# if len(idx) > 0:
# p[idx].plot(style=style)
# --- plot trades
#colored line for long positions
idx = (self.data['shares'] > 0) | (self.data['shares'] > 0).shift(1)
if idx.any():
p[idx].plot(style='go')
l.append('long')
#colored line for short positions
idx = (self.data['shares'] < 0) | (self.data['shares'] < 0).shift(1)
if idx.any():
p[idx].plot(style='ro')
l.append('short')
plt.xlim([p.index[0],p.index[-1]]) # show full axis
plt.legend(l,loc='best')
plt.title('trades')
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print '\r',self,
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
| bsd-3-clause |
fulmicoton/pylearn2 | pylearn2/cross_validation/dataset_iterators.py | 29 | 19389 | """
Cross-validation dataset iterators.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
import warnings
try:
from sklearn.cross_validation import (KFold, StratifiedKFold, ShuffleSplit,
StratifiedShuffleSplit)
except ImportError:
warnings.warn("Could not import from sklearn.")
from pylearn2.compat import OrderedDict
from pylearn2.cross_validation.blocks import StackedBlocksCV
from pylearn2.cross_validation.subset_iterators import (
ValidationKFold, StratifiedValidationKFold, ValidationShuffleSplit,
StratifiedValidationShuffleSplit)
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.datasets.transformer_dataset import TransformerDataset
class DatasetCV(object):
"""
Construct a new DenseDesignMatrix for each subset.
Parameters
----------
dataset : object
Full dataset for use in cross validation.
subset_iterator : iterable
Iterable that returns (train, test) or (train, valid, test) indices
for partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
def __init__(self, dataset, subset_iterator, preprocessor=None,
fit_preprocessor=False, which_set=None, return_dict=True):
self.dataset = dataset
self.subset_iterator = list(subset_iterator) # allow generator reuse
dataset_iterator = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True)
self._data = dataset_iterator.next()
self.preprocessor = preprocessor
self.fit_preprocessor = fit_preprocessor
self.which_set = which_set
if which_set is not None:
which_set = np.atleast_1d(which_set)
assert len(which_set)
for label in which_set:
if label not in ['train', 'valid', 'test']:
raise ValueError("Unrecognized subset '{}'".format(label))
self.which_set = which_set
self.return_dict = return_dict
def get_data_subsets(self):
"""
Partition the dataset according to cross-validation subsets and
return the raw data in each subset.
"""
for subsets in self.subset_iterator:
labels = None
if len(subsets) == 3:
labels = ['train', 'valid', 'test']
elif len(subsets) == 2:
labels = ['train', 'test']
# data_subsets is an OrderedDict to maintain label order
data_subsets = OrderedDict()
for i, subset in enumerate(subsets):
subset_data = tuple(data[subset] for data in self._data)
if len(subset_data) == 2:
X, y = subset_data
else:
X, = subset_data
y = None
data_subsets[labels[i]] = (X, y)
yield data_subsets
def __iter__(self):
"""
Create a DenseDesignMatrix for each dataset subset and apply any
preprocessing to the child datasets.
"""
for data_subsets in self.get_data_subsets():
datasets = {}
for label, data in data_subsets.items():
X, y = data
datasets[label] = DenseDesignMatrix(X=X, y=y)
# preprocessing
if self.preprocessor is not None:
self.preprocessor.apply(datasets['train'],
can_fit=self.fit_preprocessor)
for label, dataset in datasets.items():
if label == 'train':
continue
self.preprocessor.apply(dataset, can_fit=False)
# which_set
if self.which_set is not None:
for label, dataset in list(datasets.items()):
if label not in self.which_set:
del datasets[label]
del data_subsets[label]
if not len(datasets):
raise ValueError("No matching dataset(s) for " +
"{}".format(self.which_set))
if not self.return_dict:
# data_subsets is an OrderedDict to maintain label order
datasets = list(datasets[label]
for label in data_subsets.keys())
if len(datasets) == 1:
datasets, = datasets
yield datasets
class StratifiedDatasetCV(DatasetCV):
"""
Subclass of DatasetCV for stratified experiments, where
the relative class proportions of the full dataset are maintained in
each partition.
Parameters
----------
dataset : object
Dataset to use in cross validation.
subset_iterator : iterable
Iterable that returns train/test or train/valid/test splits for
partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
@staticmethod
def get_y(dataset):
"""
Stratified cross-validation requires label information for
examples. This function gets target values for a dataset,
converting from one-hot encoding to a 1D array as needed.
Parameters
----------
dataset : object
Dataset containing target values for examples.
"""
y = np.asarray(dataset.y)
if y.ndim > 1:
assert np.array_equal(np.unique(y), [0, 1])
y = np.argmax(y, axis=1)
return y
class TransformerDatasetCV(object):
"""
Cross-validation with dataset transformations. This class returns
dataset subsets after transforming them with one or more pretrained
models.
Parameters
----------
dataset_iterator : DatasetCV
Cross-validation dataset iterator providing train/test or
train/valid/test datasets.
transformers : Model or iterable
Transformer model(s) to use for transforming datasets.
"""
def __init__(self, dataset_iterator, transformers):
self.dataset_iterator = dataset_iterator
self.transformers = transformers
def __iter__(self):
"""
Construct a Transformer dataset for each partition.
"""
for k, datasets in enumerate(self.dataset_iterator):
if isinstance(self.transformers, list):
transformer = self.transformers[k]
elif isinstance(self.transformers, StackedBlocksCV):
transformer = self.transformers.select_fold(k)
else:
transformer = self.transformers
if isinstance(datasets, list):
for i, dataset in enumerate(datasets):
datasets[i] = TransformerDataset(dataset, transformer)
else:
for key, dataset in datasets.items():
datasets[key] = TransformerDataset(dataset, transformer)
yield datasets
class DatasetKFold(DatasetCV):
"""
K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = KFold(n, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
super(DatasetKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
try:
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
except TypeError:
assert not shuffle and not random_state, (
"The 'shuffle' and 'random_state' arguments are not " +
"supported by this version of sklearn. See "
"http://scikit-learn.org/stable/developers/index.html" +
"#git-repo for details on installing the development version.")
cv = StratifiedKFold(y, n_folds=n_folds)
super(StratifiedDatasetKFold, self).__init__(dataset, cv, **kwargs)
class DatasetShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ShuffleSplit(n, n_iter=n_iter, test_size=test_size,
train_size=train_size, random_state=random_state)
super(DatasetShuffleSplit, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
train_size=train_size,
random_state=random_state)
super(StratifiedDatasetShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationKFold(DatasetCV):
"""
K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = ValidationKFold(n, n_folds, shuffle, random_state)
super(DatasetValidationKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetValidationKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationKFold(y, n_folds, shuffle, random_state)
super(StratifiedDatasetValidationKFold, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ValidationShuffleSplit(n, n_iter, test_size, valid_size,
train_size, random_state)
super(DatasetValidationShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class StratifiedDatasetValidationShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation with train/valid/test
subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationShuffleSplit(y, n_iter, test_size, valid_size,
train_size, random_state)
super(StratifiedDatasetValidationShuffleSplit, self).__init__(dataset,
cv,
**kwargs)
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 79 | 2849 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
B3AU/waveTree | sklearn/cluster/tests/test_spectral.py | 5 | 9160 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
from sklearn.metrics.pairwise import kernel_metrics
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances, adjusted_rand_score
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_lobpcg_mode():
# Test the lobpcg mode of SpectralClustering
# We need a fairly big data matrix, as lobpcg does not work with
# small data matrices
centers = np.array([
[0., 0.],
[10., 10.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=.1, random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="lobpcg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
# We need a large matrice, or the lobpcg solver will fallback to its
# non-sparse and buggy mode
S = np.array([[1, 5, 2, 2, 1, 0, 0, 0, 0, 0],
[5, 1, 3, 2, 1, 0, 0, 0, 0, 0],
[2, 3, 1, 1, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 2, 1, 1, 1],
[0, 0, 0, 0, 1, 2, 2, 3, 3, 2],
[0, 0, 0, 0, 2, 2, 3, 3, 3, 4],
[0, 0, 0, 0, 1, 3, 3, 1, 2, 4],
[0, 0, 0, 0, 1, 3, 3, 2, 1, 4],
[0, 0, 0, 0, 1, 2, 4, 4, 4, 1],
])
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
if labels[0] == 0:
labels = 1 - labels
assert_greater(np.mean(labels == [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]), .89)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
print(labels)
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
"""Histogram kernel implemented as a callable."""
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.todense()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
laurent-george/bokeh | examples/app/stock_applet/stock_app.py | 42 | 7786 | """
This file demonstrates a bokeh applet, which can either be viewed
directly on a bokeh-server, or embedded into a flask application.
See the README.md file in this directory for instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
from os import listdir
from os.path import dirname, join, splitext
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, Plot
from bokeh.plotting import figure, curdoc
from bokeh.properties import String, Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import HBox, VBox, VBoxForm, PreText, Select
# build up list of stock data in the daily folder
data_dir = join(dirname(__file__), "daily")
try:
tickers = listdir(data_dir)
except OSError as e:
print('Stock data not available, see README for download instructions.')
raise e
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
# cache stock data as dict of pandas DataFrames
pd_cache = {}
def get_ticker_data(ticker):
fname = join(data_dir, "table_%s.csv" % ticker.lower())
data = pd.read_csv(
fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date']
)
data = data.set_index('date')
data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()})
return data
def get_data(ticker1, ticker2):
if pd_cache.get((ticker1, ticker2)) is not None:
return pd_cache.get((ticker1, ticker2))
# only append columns if it is the same ticker
if ticker1 != ticker2:
data1 = get_ticker_data(ticker1)
data2 = get_ticker_data(ticker2)
data = pd.concat([data1, data2], axis=1)
else:
data = get_ticker_data(ticker1)
data = data.dropna()
pd_cache[(ticker1, ticker2)] = data
return data
class StockApp(VBox):
extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
jsmodel = "VBox"
# text statistics
pretext = Instance(PreText)
# plots
plot = Instance(Plot)
line_plot1 = Instance(Plot)
line_plot2 = Instance(Plot)
hist1 = Instance(Plot)
hist2 = Instance(Plot)
# data source
source = Instance(ColumnDataSource)
# layout boxes
mainrow = Instance(HBox)
histrow = Instance(HBox)
statsbox = Instance(VBox)
# inputs
ticker1 = String(default="AAPL")
ticker2 = String(default="GOOG")
ticker1_select = Instance(Select)
ticker2_select = Instance(Select)
input_box = Instance(VBoxForm)
def __init__(self, *args, **kwargs):
super(StockApp, self).__init__(*args, **kwargs)
self._dfs = {}
@classmethod
def create(cls):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
# create layout widgets
obj = cls()
obj.mainrow = HBox()
obj.histrow = HBox()
obj.statsbox = VBox()
obj.input_box = VBoxForm()
# create input widgets
obj.make_inputs()
# outputs
obj.pretext = PreText(text="", width=500)
obj.make_source()
obj.make_plots()
obj.make_stats()
# layout
obj.set_children()
return obj
def make_inputs(self):
self.ticker1_select = Select(
name='ticker1',
value='AAPL',
options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
)
self.ticker2_select = Select(
name='ticker2',
value='GOOG',
options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
)
@property
def selected_df(self):
pandas_df = self.df
selected = self.source.selected['1d']['indices']
if selected:
pandas_df = pandas_df.iloc[selected, :]
return pandas_df
def make_source(self):
self.source = ColumnDataSource(data=self.df)
def line_plot(self, ticker, x_range=None):
p = figure(
title=ticker,
x_range=x_range,
x_axis_type='datetime',
plot_width=1000, plot_height=200,
title_text_font_size="10pt",
tools="pan,wheel_zoom,box_select,reset"
)
p.circle(
'date', ticker,
size=2,
source=self.source,
nonselection_alpha=0.02
)
return p
def hist_plot(self, ticker):
global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50)
hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
start = global_bins.min()
end = global_bins.max()
top = hist.max()
p = figure(
title="%s hist" % ticker,
plot_width=500, plot_height=200,
tools="",
title_text_font_size="10pt",
x_range=[start, end],
y_range=[0, top],
)
p.rect(center, hist / 2.0, width, hist)
return p
def make_plots(self):
ticker1 = self.ticker1
ticker2 = self.ticker2
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=self.source
)
self.plot = p
self.line_plot1 = self.line_plot(ticker1)
self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range)
self.hist_plots()
def hist_plots(self):
ticker1 = self.ticker1
ticker2 = self.ticker2
self.hist1 = self.hist_plot(ticker1)
self.hist2 = self.hist_plot(ticker2)
def set_children(self):
self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2]
self.mainrow.children = [self.input_box, self.plot, self.statsbox]
self.input_box.children = [self.ticker1_select, self.ticker2_select]
self.histrow.children = [self.hist1, self.hist2]
self.statsbox.children = [self.pretext]
def input_change(self, obj, attrname, old, new):
if obj == self.ticker2_select:
self.ticker2 = new
if obj == self.ticker1_select:
self.ticker1 = new
self.make_source()
self.make_plots()
self.set_children()
curdoc().add(self)
def setup_events(self):
super(StockApp, self).setup_events()
if self.source:
self.source.on_change('selected', self, 'selection_change')
if self.ticker1_select:
self.ticker1_select.on_change('value', self, 'input_change')
if self.ticker2_select:
self.ticker2_select.on_change('value', self, 'input_change')
def make_stats(self):
stats = self.selected_df.describe()
self.pretext.text = str(stats)
def selection_change(self, obj, attrname, old, new):
self.make_stats()
self.hist_plots()
self.set_children()
curdoc().add(self)
@property
def df(self):
return get_data(self.ticker1, self.ticker2)
# The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL
# will render this StockApp. If you don't want serve this applet from a Bokeh
# server (for instance if you are embedding in a separate Flask application),
# then just remove this block of code.
@bokeh_app.route("/bokeh/stocks/")
@object_page("stocks")
def make_stocks():
app = StockApp.create()
return app
| bsd-3-clause |
logpai/logparser | benchmark/LKE_benchmark.py | 1 | 5357 | #!/usr/bin/env python
import sys
sys.path.append('../')
from logparser import LKE, evaluator
import os
import pandas as pd
input_dir = '../logs/' # The input directory of log file
output_dir = 'LKE_result/' # The output directory of parsing results
benchmark_settings = {
'HDFS': {
'log_file': 'HDFS/HDFS_2k.log',
'log_format': '<Date> <Time> <Pid> <Level> <Component>: <Content>',
'regex': [r'blk_-?\d+', r'(\d+\.){3}\d+(:\d+)?'],
'split_threshold': 3
},
'Hadoop': {
'log_file': 'Hadoop/Hadoop_2k.log',
'log_format': '<Date> <Time> <Level> \[<Process>\] <Component>: <Content>',
'regex': [r'(\d+\.){3}\d+'],
'split_threshold': 2
# 3
},
'Spark': {
'log_file': 'Spark/Spark_2k.log',
'log_format': '<Date> <Time> <Level> <Component>: <Content>',
'regex': [r'(\d+\.){3}\d+', r'\b[KGTM]?B\b', r'([\w-]+\.){2,}[\w-]+'],
'split_threshold': 5
},
'Zookeeper': {
'log_file': 'Zookeeper/Zookeeper_2k.log',
'log_format': '<Date> <Time> - <Level> \[<Node>:<Component>@<Id>\] - <Content>',
'regex': [r'(/|)(\d+\.){3}\d+(:\d+)?'],
'split_threshold': 20
},
'BGL': {
'log_file': 'BGL/BGL_2k.log',
'log_format': '<Label> <Timestamp> <Date> <Node> <Time> <NodeRepeat> <Type> <Component> <Level> <Content>',
'regex': [r'core\.\d+'],
'split_threshold': 30
},
'HPC': {
'log_file': 'HPC/HPC_2k.log',
'log_format': '<LogId> <Node> <Component> <State> <Time> <Flag> <Content>',
'regex': [r'=\d+'],
'split_threshold': 10
},
'Thunderbird': {
'log_file': 'Thunderbird/Thunderbird_2k.log',
'log_format': '<Label> <Timestamp> <Date> <User> <Month> <Day> <Time> <Location> <Component>(\[<PID>\])?: <Content>',
'regex': [r'(\d+\.){3}\d+'],
'split_threshold': 2
},
'Windows': {
'log_file': 'Windows/Windows_2k.log',
'log_format': '<Date> <Time>, <Level> <Component> <Content>',
'regex': [r'0x.*?\s'],
'split_threshold': 4
},
'Linux': {
'log_file': 'Linux/Linux_2k.log',
'log_format': '<Month> <Date> <Time> <Level> <Component>(\[<PID>\])?: <Content>',
'regex': [r'(\d+\.){3}\d+', r'\d{2}:\d{2}:\d{2}'],
'split_threshold': 10
},
'Andriod': {
'log_file': 'Andriod/Andriod_2k.log',
'log_format': '<Date> <Time> <Pid> <Tid> <Level> <Component>: <Content>',
'regex': [r'(/[\w-]+)+', r'([\w-]+\.){2,}[\w-]+', r'\b(\-?\+?\d+)\b|\b0[Xx][a-fA-F\d]+\b|\b[a-fA-F\d]{4,}\b'],
'split_threshold': 260,
},
'HealthApp': {
'log_file': 'HealthApp/HealthApp_2k.log',
'log_format': '<Time>\|<Component>\|<Pid>\|<Content>',
'regex': [],
'split_threshold': 50,
},
'Apache': {
'log_file': 'Apache/Apache_2k.log',
'log_format': '\[<Time>\] \[<Level>\] <Content>',
'regex': [r'(\d+\.){3}\d+'],
'split_threshold': 5
},
'Proxifier': {
'log_file': 'Proxifier/Proxifier_2k.log',
'log_format': '\[<Time>\] <Program> - <Content>',
'regex': [r'<\d+\ssec', r'([\w-]+\.)+[\w-]+(:\d+)?', r'\d{2}:\d{2}(:\d{2})*', r'[KGTM]B'],
'split_threshold': 3
},
'OpenSSH': {
'log_file': 'OpenSSH/OpenSSH_2k.log',
'log_format': '<Date> <Day> <Time> <Component> sshd\[<Pid>\]: <Content>',
'regex': [r'(\d+\.){3}\d+', r'([\w-]+\.){2,}[\w-]+'],
'split_threshold': 100
},
'OpenStack': {
'log_file': 'OpenStack/OpenStack_2k.log',
'log_format': '<Logrecord> <Date> <Time> <Pid> <Level> <Component> \[<ADDR>\] <Content>',
'regex': [r'((\d+\.){3}\d+,?)+', r'/.+?\s', r'\d+'],
'split_threshold': 8
},
'Mac': {
'log_file': 'Mac/Mac_2k.log',
'log_format': '<Month> <Date> <Time> <User> <Component>\[<PID>\]( \(<Address>\))?: <Content>',
'regex': [r'([\w-]+\.){2,}[\w-]+'],
'split_threshold': 600
}
}
bechmark_result = []
for dataset, setting in benchmark_settings.iteritems():
print('\n=== Evaluation on %s ==='%dataset)
indir = os.path.join(input_dir, os.path.dirname(setting['log_file']))
log_file = os.path.basename(setting['log_file'])
parser = LKE.LogParser(log_format=setting['log_format'], indir=indir, outdir=output_dir, rex=setting['regex'],
split_threshold=setting['split_threshold'])
parser.parse(log_file)
F1_measure, accuracy = evaluator.evaluate(
groundtruth=os.path.join(indir, log_file + '_structured.csv'),
parsedresult=os.path.join(output_dir, log_file + '_structured.csv')
)
bechmark_result.append([dataset, F1_measure, accuracy])
print('\n=== Overall evaluation results ===')
df_result = pd.DataFrame(bechmark_result, columns=['Dataset', 'F1_measure', 'Accuracy'])
df_result.set_index('Dataset', inplace=True)
print(df_result)
df_result.T.to_csv('LKE_bechmark_result.csv')
| mit |
bsipocz/statsmodels | examples/python/kernel_density.py | 33 | 1805 |
## Kernel Density Estimation
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
##### A univariate example.
np.random.seed(12345)
obs_dist1 = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
kde = sm.nonparametric.KDEUnivariate(obs_dist1)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.hist(obs_dist1, bins=50, normed=True, color='red')
ax.plot(kde.support, kde.density, lw=2, color='black');
obs_dist2 = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
kde2 = sm.nonparametric.KDEUnivariate(obs_dist2)
kde2.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.hist(obs_dist2, bins=50, normed=True, color='red')
ax.plot(kde2.support, kde2.density, lw=2, color='black');
# The fitted KDE object is a full non-parametric distribution.
obs_dist3 = mixture_rvs([.25,.75], size=1000, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
kde3 = sm.nonparametric.KDEUnivariate(obs_dist3)
kde3.fit()
kde3.entropy
kde3.evaluate(-1)
##### CDF
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.cdf);
##### Cumulative Hazard Function
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.cumhazard);
##### Inverse CDF
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.icdf);
##### Survival Function
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.sf);
| bsd-3-clause |
ypid/series60-remote | pc/widget/StatisticCanvas.py | 1 | 2637 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2009 Lukas Hetzenecker <LuHe@gmx.at>
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# Matplotlib
try:
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
except ImportError:
USE_MATPLOTLIB = False
else:
USE_MATPLOTLIB= True
if USE_MATPLOTLIB:
class StatisticCanvas(FigureCanvas):
def __init__(self, parent=None, width = 10, height = 1.7, dpi = 100, sharex = None, sharey = None):
self.fig = Figure(figsize = (width, height), dpi=dpi, facecolor = '#FFFFFF')
self.ax = self.fig.add_subplot(111, sharex = sharex, sharey = sharey)
self.fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9)
#self.xtitle="x-Axis"
self.ytitle= QApplication.translate("All Messages", str(self))
#self.PlotTitle = "Some Plot"
self.grid_status = True
self.xaxis_style = 'linear'
self.yaxis_style = 'linear'
#self.format_labels()
#self.ax.hold(True)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def format_labels(self):
#self.ax.set_title(self.PlotTitle)
#self.ax.title.set_fontsize(10)
#self.ax.set_xlabel(self.xtitle, fontsize = 9)
self.ax.set_ylabel(self.ytitle, fontsize = 9)
labels_x = self.ax.get_xticklabels()
labels_y = self.ax.get_yticklabels()
for xlabel in labels_x:
xlabel.set_fontsize(8)
for ylabel in labels_y:
ylabel.set_fontsize(8)
ylabel.set_color('b')
#def sizeHint(self):
# w, h = self.get_width_height()
# return QSize(w, h)
def minimumSizeHint(self):
return QSize(10, 10)
else:
class StatisticCanvas(QLabel):
def __init__(self, parent=None):
super(StatisticCanvas, self).__init__(parent)
self.setText(self.tr("Matplotlib not found - Please install it."))
| gpl-2.0 |
kakaba2009/MachineLearning | python/src/mylib/mlstm.py | 1 | 9814 | import math
import os.path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import src.mylib.mfile as mfile
import src.mylib.mcalc as mcalc
from matplotlib import style
from keras.utils import np_utils
from keras.optimizers import Adam, RMSprop
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential, load_model
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, LSTM, Activation, Masking, TimeDistributed, Dropout
# convert an array of values into a dataset matrix
def create_dataset(dataset, seqn_size=1):
dataX, dataY = [], []
for i in range(dataset.shape[0]-seqn_size):
a = dataset[i:(i+seqn_size), :]
a = np.ndarray.flatten(a)
dataX.append(a)
b = dataset[i+1:(i+seqn_size+1),:]
b = np.ndarray.flatten(b)
dataY.append(b)
#print( a, '->', b )
return np.array(dataX), np.array(dataY)
def create_dataset_class(dataset, seqn_size=1):
dataX, dataY = [], []
for i in range(dataset.shape[0]-seqn_size):
a = dataset[i:(i+seqn_size), :]
dataX.append(a)
b = dataset[i + seqn_size, :]
p = dataset[i + seqn_size - 1, :]
if(b > p):
dataY.append([1, 0, 0])
elif(b == p):
dataY.append([0, 1, 0])
else:
dataY.append([0, 0, 1])
#print( a, '->', b )
return np.array(dataX), np.array(dataY)
def printX_Y(X, Y):
for i in range(len(X)):
a = X[i]
b = Y[i]
print(a, "->", b)
def printX_YScaler(X, Y, scale, e=False):
X = inverse_transform(X, scale, e)
Y = inverse_transform(Y, scale, e)
printX_Y(X, Y)
def loadExample():
dataframe = pd.read_csv('db/international-airline-passengers.csv', usecols=[1], engine='python')
dataset = dataframe.values
return dataset
def saveModel(model, filepath):
model.save(filepath)
def loadModel(filepath,batch_size,iShape,loss='categorical_crossentropy',opt='adam',stack=1,state=False,od=1,act='softmax',neurons=8):
if(os.path.exists(filepath)):
# returns a compiled model
# identical to the previous one
model = load_model(filepath)
if(state == True):
model.reset_states()
print(model.summary())
else:
model = createModel(batch_size, iShape, loss, opt, stack, state, od, act, neurons)
jsonModel = model.to_json()
print(jsonModel)
return model
def createModel(batch_size,iShape,obj='mean_squared_error',opt='adam',stack=1,state=False,od=1,act='softmax',neurons=8):
model = Sequential()
#model.add(BatchNormalization(batch_input_shape=(batch_size, iShape[1], iShape[2])))
#model.add(Masking([0, 0, 0]))
#shape input to be [samples, time steps, features]
for i in range(stack):
if(i == (stack -1)):
if(state == True):
model.add(LSTM(output_dim=neurons, batch_input_shape=(batch_size, iShape[1], iShape[2]), stateful=state, return_sequences=False))
else:
model.add(LSTM(output_dim=neurons, input_dim=iShape[2], stateful=state, return_sequences=False))
print("Added LSTM Layer@", i, "return_sequences=False")
else:
if(state == True):
model.add(LSTM(output_dim=neurons, batch_input_shape=(batch_size, iShape[1], iShape[2]), stateful=state, return_sequences=True))
else:
model.add(LSTM(output_dim=neurons, input_dim=iShape[2], stateful=state, return_sequences=True))
print("Added LSTM Layer@", i, "return_sequences=True")
model.add(Dropout(0.2))
model.add(Dense(output_dim=od))
#model.add(TimeDistributed(Dense(output_dim=od)))
print("Added Dense Layer")
model.add(Activation(act))
print("Added Activation Layer")
print(model.summary())
if(opt == "adam"):
opt = Adam(lr=0.001)
elif(opt == "RMSProp"):
opt = RMSprop(lr=0.001)
model.compile(loss=obj, optimizer=opt, metrics=["accuracy"])
return model
def updateModel(model, opt):
model.compile(loss='mean_squared_error', optimizer=opt, metrics=["accuracy"])
def minmaxScaler():
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
return scaler
def normalize(dataset, scale=1, e=False):
# normalize the dataset
dataset = dataset / scale
return dataset
def inverse_transform(matrix, scale=1, e=False):
X = matrix * scale
if(e == True):
X = np.exp(X)
return X
def log_scale(X, Y):
lx = np.log(X)
ly = np.log(Y)
return lx, ly
def exp_scale(X, Y):
ex = np.exp(X)
ey = np.exp(Y)
return ex, ey
def printScore(model, X, Y):
#calculate root mean squared error
#scores = math.sqrt(mean_squared_error(X, Y))
scores = model.evaluate(X, Y, verbose=0)
print('Evaluate Score:', scores)
def plot_result_1F(dataset, train, test, seqn_size):
#input array shape is (, 1)
style.use('ggplot')
# shift train predictions for plotting
trainPr = np.empty_like(dataset)
trainPr[:, :] = np.nan
trainPr[seqn_size:len(train)+seqn_size, :] = train
# shift test predictions for plotting
testPre = np.empty_like(dataset)
testPre[:, :] = np.nan
testPre[-1*len(test):, :] = test
# plot baseline and predictions
X1 = np.arange(dataset.shape[0])
Y1 = dataset.flatten()
plt.scatter(X1, Y1, color="blue")
plt.scatter(np.arange(trainPr.shape[0]), trainPr.flatten(), color="green")
plt.scatter(np.arange(testPre.shape[0]), testPre.flatten(), color="red")
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def trainModel(model, X, Y, batch_size, epochs, modelSaved, validation=None, cb=None):
for i in range(1):
hist = model.fit(X, Y, nb_epoch=epochs, batch_size=batch_size, verbose=2, shuffle=False,
validation_split=0.0, validation_data=validation, callbacks=cb)
print(hist.history)
if(i > 1 and i % 2 == 0):
saveModel(model, modelSaved)
print("Model saved at middle point")
model.reset_states()
print("Model State Reset")
saveModel(model, modelSaved)
print("Model saved at complete point")
def lastInputSeq(X, lag):
lX = X[-1 * lag:, :]
# reshape input to be [samples, time steps, features]
lY = np.reshape(lX, (1, -1, lag))
return lX, lY
def calUpDown(X):
Y = np.zeros_like(X)
siz = len(X)
for i in range(siz):
if(i > 0):
if(X[i] > X[i-1]):
Y[i] = 1
elif(X[i] == X[i-1]):
Y[i] = 0
else:
Y[i] = -1
return Y
def classMap():
map = { 0 : [ 1 ], 1 : [ 0 ], 2 : [-1 ] }
return map
def inverseMap(X):
map = classMap()
Y = np.zeros((X.shape[0], 1))
for i in range(X.shape[0]):
index = np.argmax(X)
Y[i] = map[index]
print(Y)
return Y
def to_class(X):
Y = np_utils.to_categorical(X)
return Y
def plot_all_feature(X, c):
features = X.shape[1]
for i in range(features):
F = X[:,i]
plt.scatter(np.arange(F.shape[0]), F, color=c)
def plot_result_2F(dataset, predict, test=None, seqn_size=1):
#input array shape is (, 2)
style.use('ggplot')
# plot baseline and predictions
plot_all_feature(dataset, "blue")
plot_all_feature(predict, "green")
if(test != None):
# shift test predictions for plotting
testPre = np.empty_like(dataset)
testPre[:, :] = np.nan
testPre[-1*len(test):, :] = test
plot_all_feature(testPre, "red")
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def loadFXData(Symbol, db, num):
# load the dataset
dataframe = mfile.loadOneSymbol(Symbol, db)
dataframe = mcalc.c_lastn(dataframe, num)
return dataframe
def setupTrainTest(dataset, seqn_size, SCALE, LOG):
# split into train and test sets
test_size = seqn_size + 5
train_size = dataset.shape[0] - test_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
trainX, trainY = create_dataset(train, seqn_size)
testX, testY = create_dataset(test, seqn_size)
printX_YScaler(testX, testY, SCALE, LOG)
return trainX, trainY, testX, testY
def plot_results(predicted_data, true_data):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Value')
plt.plot(predicted_data, label='Prediction')
plt.legend()
plt.show()
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Value')
#Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show() | apache-2.0 |
tochikuji/chainer-libDNN | examples/mnist/AE.py | 1 | 1210 | # example of Convolutional Auto-encoder with layer visualization
from libdnn import AutoEncoder
import chainer
import chainer.functions as F
import chainer.optimizers as Opt
import numpy
from sklearn.datasets import fetch_mldata
model = chainer.FunctionSet(
fh1=F.Linear(28 ** 2, 100),
fh3=F.Linear(100, 28 ** 2),
)
def forward(self, x, train):
if train:
x = F.dropout(x, ratio=0.4)
h = F.dropout(F.sigmoid(self.model.fh1(x)), train=train)
h = F.dropout(self.model.fh3(h), train=train)
return h
ae = AutoEncoder(model, gpu=-1)
ae.set_forward(forward)
ae.set_optimizer(Opt.Adam)
mnist = fetch_mldata('MNIST original', data_home='.')
perm = numpy.random.permutation(len(mnist.data))
mnist.data = mnist.data.astype(numpy.float32) / 255
train_data = mnist.data[perm][:60000]
test_data = mnist.data[perm][60000:]
for epoch in range(10):
print('epoch : %d' % (epoch + 1))
err = ae.train(train_data, batchsize=200)
print(err)
perm = numpy.random.permutation(len(test_data))
terr = ae.test(test_data[perm][:100])
print(terr)
with open('ae.log', mode='a') as f:
f.write("%d %f %f\n" % (epoch + 1, err, terr))
ae.save_param('ae.param.npy')
| mit |
kdebrab/pandas | asv_bench/benchmarks/reshape.py | 3 | 3829 | from itertools import product
import numpy as np
from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
from .pandas_vb_common import setup # noqa
class Melt(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C'])
self.df['id1'] = np.random.randint(0, 10, 10000)
self.df['id2'] = np.random.randint(100, 1000, 10000)
def time_melt_dataframe(self):
melt(self.df, id_vars=['id1', 'id2'])
class Pivot(object):
goal_time = 0.2
def setup(self):
N = 10000
index = date_range('1/1/2000', periods=N, freq='h')
data = {'value': np.random.randn(N * 50),
'variable': np.arange(50).repeat(N),
'date': np.tile(index.values, 50)}
self.df = DataFrame(data)
def time_reshape_pivot_time_series(self):
self.df.pivot('date', 'variable', 'value')
class SimpleReshape(object):
goal_time = 0.2
def setup(self):
arrays = [np.arange(100).repeat(100),
np.roll(np.tile(np.arange(100), 100), 25)]
index = MultiIndex.from_arrays(arrays)
self.df = DataFrame(np.random.randn(10000, 4), index=index)
self.udf = self.df.unstack(1)
def time_stack(self):
self.udf.stack()
def time_unstack(self):
self.df.unstack(1)
class Unstack(object):
goal_time = 0.2
def setup(self):
m = 100
n = 1000
levels = np.arange(m)
index = MultiIndex.from_product([levels] * 2)
columns = np.arange(n)
values = np.arange(m * m * n).reshape(m * m, n)
self.df = DataFrame(values, index, columns)
self.df2 = self.df.iloc[:-1]
def time_full_product(self):
self.df.unstack()
def time_without_last_row(self):
self.df2.unstack()
class SparseIndex(object):
goal_time = 0.2
def setup(self):
NUM_ROWS = 1000
self.df = DataFrame({'A': np.random.randint(50, size=NUM_ROWS),
'B': np.random.randint(50, size=NUM_ROWS),
'C': np.random.randint(-10, 10, size=NUM_ROWS),
'D': np.random.randint(-10, 10, size=NUM_ROWS),
'E': np.random.randint(10, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
self.df = self.df.set_index(['A', 'B', 'C', 'D', 'E'])
def time_unstack(self):
self.df.unstack()
class WideToLong(object):
goal_time = 0.2
def setup(self):
nyrs = 20
nidvars = 20
N = 5000
self.letters = list('ABCD')
yrvars = [l + str(num)
for l, num in product(self.letters, range(1, nyrs + 1))]
columns = [str(i) for i in range(nidvars)] + yrvars
self.df = DataFrame(np.random.randn(N, nidvars + len(yrvars)),
columns=columns)
self.df['id'] = self.df.index
def time_wide_to_long_big(self):
wide_to_long(self.df, self.letters, i='id', j='year')
class PivotTable(object):
goal_time = 0.2
def setup(self):
N = 100000
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
ind1 = np.random.randint(0, 3, size=N)
ind2 = np.random.randint(0, 2, size=N)
self.df = DataFrame({'key1': fac1.take(ind1),
'key2': fac2.take(ind2),
'key3': fac2.take(ind2),
'value1': np.random.randn(N),
'value2': np.random.randn(N),
'value3': np.random.randn(N)})
def time_pivot_table(self):
self.df.pivot_table(index='key1', columns=['key2', 'key3'])
| bsd-3-clause |
decvalts/cartopy | lib/cartopy/tests/mpl/test_images.py | 1 | 6074 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import os
import types
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import pytest
import shapely.geometry as sgeom
from cartopy import config
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
from cartopy.tests.mpl import MPL_VERSION, ImageTesting
import cartopy.tests.test_img_tiles as ctest_tiles
NATURAL_EARTH_IMG = os.path.join(config["repo_data_dir"],
'raster', 'natural_earth',
'50-natural-earth-1-downsampled.png')
REGIONAL_IMG = os.path.join(config['repo_data_dir'], 'raster', 'sample',
'Miriam.A2012270.2050.2km.jpg')
# We have an exceptionally large tolerance for the web_tiles test.
# The basemap changes on a regular basis (for seasons) and we really only
# care that it is putting images onto the map which are roughly correct.
@pytest.mark.natural_earth
@pytest.mark.network
@ImageTesting(['web_tiles'],
tolerance=12 if MPL_VERSION < '2' else 2.9)
def test_web_tiles():
extent = [-15, 0.1, 50, 60]
target_domain = sgeom.Polygon([[extent[0], extent[1]],
[extent[2], extent[1]],
[extent[2], extent[3]],
[extent[0], extent[3]],
[extent[0], extent[1]]])
map_prj = cimgt.GoogleTiles().crs
ax = plt.subplot(2, 2, 1, projection=map_prj)
gt = cimgt.GoogleTiles()
gt._image_url = types.MethodType(ctest_tiles.GOOGLE_IMAGE_URL_REPLACEMENT,
gt)
img, extent, origin = gt.image_for_domain(target_domain, 1)
ax.imshow(np.array(img), extent=extent, transform=gt.crs,
interpolation='bilinear', origin=origin)
ax.coastlines(color='white')
ax = plt.subplot(2, 2, 2, projection=map_prj)
qt = cimgt.QuadtreeTiles()
img, extent, origin = qt.image_for_domain(target_domain, 1)
ax.imshow(np.array(img), extent=extent, transform=qt.crs,
interpolation='bilinear', origin=origin)
ax.coastlines(color='white')
ax = plt.subplot(2, 2, 3, projection=map_prj)
osm = cimgt.OSM()
img, extent, origin = osm.image_for_domain(target_domain, 1)
ax.imshow(np.array(img), extent=extent, transform=osm.crs,
interpolation='bilinear', origin=origin)
ax.coastlines()
@pytest.mark.natural_earth
@pytest.mark.network
@ImageTesting(['image_merge'],
tolerance=3.6 if MPL_VERSION < '2' else 0)
def test_image_merge():
# tests the basic image merging functionality
tiles = []
for i in range(1, 4):
for j in range(0, 3):
tiles.append((i, j, 2))
gt = cimgt.GoogleTiles()
gt._image_url = types.MethodType(ctest_tiles.GOOGLE_IMAGE_URL_REPLACEMENT,
gt)
images_to_merge = []
for tile in tiles:
img, extent, origin = gt.get_image(tile)
img = np.array(img)
x = np.linspace(extent[0], extent[1], img.shape[1], endpoint=False)
y = np.linspace(extent[2], extent[3], img.shape[0], endpoint=False)
images_to_merge.append([img, x, y, origin])
img, extent, origin = cimgt._merge_tiles(images_to_merge)
ax = plt.axes(projection=gt.crs)
ax.set_global()
ax.coastlines()
plt.imshow(img, origin=origin, extent=extent, alpha=0.5)
@ImageTesting(['imshow_natural_earth_ortho'],
tolerance=3.96 if MPL_VERSION < '2' else 0.7)
def test_imshow():
source_proj = ccrs.PlateCarree()
img = plt.imread(NATURAL_EARTH_IMG)
# Convert the image to a byte array, rather than float, which is the
# form that JPG images would be loaded with imread.
img = (img * 255).astype('uint8')
ax = plt.axes(projection=ccrs.Orthographic())
ax.imshow(img, origin='upper', transform=source_proj,
extent=[-180, 180, -90, 90])
@pytest.mark.natural_earth
@ImageTesting(['imshow_regional_projected'],
tolerance=10.4 if MPL_VERSION < '2' else 0)
def test_imshow_projected():
source_proj = ccrs.PlateCarree()
img_extent = (-120.67660000000001, -106.32104523100001,
13.2301484511245, 30.766899999999502)
img = plt.imread(REGIONAL_IMG)
ax = plt.axes(projection=ccrs.LambertConformal())
ax.set_extent(img_extent, crs=source_proj)
ax.coastlines(resolution='50m')
ax.imshow(img, extent=img_extent, origin='upper', transform=source_proj)
@ImageTesting(['imshow_natural_earth_ortho'],
tolerance=4.15 if MPL_VERSION < '2' else 0.7)
def test_stock_img():
ax = plt.axes(projection=ccrs.Orthographic())
ax.stock_img()
@ImageTesting(['imshow_natural_earth_ortho'],
tolerance=3.96 if MPL_VERSION < '2' else 0.7)
def test_pil_Image():
img = Image.open(NATURAL_EARTH_IMG)
source_proj = ccrs.PlateCarree()
ax = plt.axes(projection=ccrs.Orthographic())
ax.imshow(img, origin='upper', transform=source_proj,
extent=[-180, 180, -90, 90])
@ImageTesting(['imshow_natural_earth_ortho'],
tolerance=4.2 if MPL_VERSION < '2' else 0)
def test_background_img():
ax = plt.axes(projection=ccrs.Orthographic())
ax.background_img(name='ne_shaded', resolution='low')
| gpl-3.0 |
endolith/scipy | scipy/spatial/kdtree.py | 11 | 33807 | # Copyright Anne M. Archibald 2008
# Released under the scipy license
import numpy as np
import warnings
from .ckdtree import cKDTree, cKDTreeNode
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""Compute the pth power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
# Find smallest common datatype with float64 (return type of this function) - addresses #10262.
# Don't just cast to float64 for complex input case.
common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype), 'float64')
# Make sure x and y are NumPy arrays of correct datatype.
x = x.astype(common_datatype)
y = y.astype(common_datatype)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle:
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the
hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(
0, np.maximum(0, np.maximum(self.mins-x, x-self.maxes)),
p
)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x, x-self.mins), p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(
0,
np.maximum(0, np.maximum(self.mins-other.maxes,
other.mins-self.maxes)),
p
)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(
0, np.maximum(self.maxes-other.mins, other.maxes-self.mins), p)
class KDTree(cKDTree):
"""kd-tree for quick nearest-neighbor lookup.
This class provides an index into a set of k-dimensional points
which can be used to rapidly look up the nearest neighbors of any
point.
Parameters
----------
data : array_like, shape (n,m)
The n data points of dimension m to be indexed. This array is
not copied unless this is necessary to produce a contiguous
array of doubles, and so modifying this data will result in
bogus results. The data are also copied if the kd-tree is built
with copy_data=True.
leafsize : positive int, optional
The number of points at which the algorithm switches over to
brute-force. Default: 10.
compact_nodes : bool, optional
If True, the kd-tree is built to shrink the hyperrectangles to
the actual data range. This usually gives a more compact tree that
is robust against degenerated input data and gives faster queries
at the expense of longer build time. Default: True.
copy_data : bool, optional
If True the data is always copied to protect the kd-tree against
data corruption. Default: False.
balanced_tree : bool, optional
If True, the median is used to split the hyperrectangles instead of
the midpoint. This usually gives a more compact tree and
faster queries at the expense of longer build time. Default: True.
boxsize : array_like or scalar, optional
Apply a m-d toroidal topology to the KDTree.. The topology is generated
by :math:`x_i + n_i L_i` where :math:`n_i` are integers and :math:`L_i`
is the boxsize along i-th dimension. The input data shall be wrapped
into :math:`[0, L_i)`. A ValueError is raised if any of the data is
outside of this bound.
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
Attributes
----------
data : ndarray, shape (n,m)
The n data points of dimension m to be indexed. This array is
not copied unless this is necessary to produce a contiguous
array of doubles. The data are also copied if the kd-tree is built
with `copy_data=True`.
leafsize : positive int
The number of points at which the algorithm switches over to
brute-force.
m : int
The dimension of a single data-point.
n : int
The number of data points.
maxes : ndarray, shape (m,)
The maximum value in each dimension of the n data points.
mins : ndarray, shape (m,)
The minimum value in each dimension of the n data points.
size : int
The number of nodes in the tree.
"""
class node:
@staticmethod
def _create(ckdtree_node=None):
"""Create either an inner or leaf node, wrapping a cKDTreeNode instance"""
if ckdtree_node is None:
return KDTree.node(ckdtree_node)
elif ckdtree_node.split_dim == -1:
return KDTree.leafnode(ckdtree_node)
else:
return KDTree.innernode(ckdtree_node)
def __init__(self, ckdtree_node=None):
if ckdtree_node is None:
ckdtree_node = cKDTreeNode()
self._node = ckdtree_node
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
@property
def idx(self):
return self._node.indices
@property
def children(self):
return self._node.children
class innernode(node):
def __init__(self, ckdtreenode):
assert isinstance(ckdtreenode, cKDTreeNode)
super().__init__(ckdtreenode)
self.less = KDTree.node._create(ckdtreenode.lesser)
self.greater = KDTree.node._create(ckdtreenode.greater)
@property
def split_dim(self):
return self._node.split_dim
@property
def split(self):
return self._node.split
@property
def children(self):
return self._node.children
@property
def tree(self):
if not hasattr(self, "_tree"):
self._tree = KDTree.node._create(super().tree)
return self._tree
def __init__(self, data, leafsize=10, compact_nodes=True, copy_data=False,
balanced_tree=True, boxsize=None):
data = np.asarray(data)
if data.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
# Note KDTree has different default leafsize from cKDTree
super().__init__(data, leafsize, compact_nodes, copy_data,
balanced_tree, boxsize)
def query(
self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf, workers=1):
"""Query the kd-tree for nearest neighbors.
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int or Sequence[int], optional
Either the number of nearest neighbors to return, or a list of the
k-th nearest neighbors to return, starting from 1.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values distance ("Manhattan" distance).
2 is the usual Euclidean distance.
infinity is the maximum-coordinate-difference distance.
A large, finite p may cause a ValueError if overflow can occur.
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
workers : int, optional
Number of workers to use for parallel processing. If -1 is given
all CPU threads are used. Default: 1.
.. versionadded:: 1.6.0
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If ``x`` has shape ``tuple+(self.m,)``, then ``d`` has shape
``tuple+(k,)``.
When k == 1, the last dimension of the output is squeezed.
Missing neighbors are indicated with infinite distances.
Hits are sorted by distance (nearest first).
.. deprecated:: 1.6.0
If ``k=None``, then ``d`` is an object array of shape ``tuple``,
containing lists of distances. This behavior is deprecated and
will be removed in SciPy 1.8.0, use ``query_ball_point``
instead.
i : integer or array of integers
The index of each neighbor in ``self.data``.
``i`` is the same shape as d.
Missing neighbors are indicated with ``self.n``.
Examples
--------
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = KDTree(np.c_[x.ravel(), y.ravel()])
To query the nearest neighbours and return squeezed result, use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=1)
>>> print(dd, ii)
[2. 0.14142136] [ 0 13]
To query the nearest neighbours and return unsqueezed result, use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=[1])
>>> print(dd, ii)
[[2. ]
[0.14142136]] [[ 0]
[13]]
To query the second nearest neighbours and return unsqueezed result,
use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=[2])
>>> print(dd, ii)
[[2.23606798]
[0.90553851]] [[ 6]
[12]]
To query the first and second nearest neighbours, use
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=2)
>>> print(dd, ii)
[[2. 2.23606798]
[0.14142136 0.90553851]] [[ 0 6]
[13 12]]
or, be more specific
>>> dd, ii = tree.query([[0, 0], [2.1, 2.9]], k=[1, 2])
>>> print(dd, ii)
[[2. 2.23606798]
[0.14142136 0.90553851]] [[ 0 6]
[13 12]]
"""
x = np.asarray(x)
if x.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
if k is None:
# k=None, return all neighbors
warnings.warn(
"KDTree.query with k=None is deprecated and will be removed "
"in SciPy 1.8.0. Use KDTree.query_ball_point instead.",
DeprecationWarning)
# Convert index query to a lists of distance and index,
# sorted by distance
def inds_to_hits(point, neighbors):
dist = minkowski_distance(point, self.data[neighbors], p)
hits = sorted([(d, i) for d, i in zip(dist, neighbors)])
return [d for d, i in hits], [i for d, i in hits]
x = np.asarray(x, dtype=np.float64)
inds = super().query_ball_point(
x, distance_upper_bound, p, eps, workers)
if isinstance(inds, list):
return inds_to_hits(x, inds)
dists = np.empty_like(inds)
for idx in np.ndindex(inds.shape):
dists[idx], inds[idx] = inds_to_hits(x[idx], inds[idx])
return dists, inds
d, i = super().query(x, k, eps, p, distance_upper_bound, workers)
if isinstance(i, int):
i = np.intp(i)
return d, i
def query_ball_point(self, x, r, p=2., eps=0, workers=1,
return_sorted=None, return_length=False):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : array_like, float
The radius of points to return, must broadcast to the length of x.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
workers : int, optional
Number of jobs to schedule for parallel processing. If -1 is given
all processors are used. Default: 1.
.. versionadded:: 1.6.0
return_sorted : bool, optional
Sorts returned indicies if True and does not sort them if False. If
None, does not sort single point queries, but does sort
multi-point queries which was the behavior before this option
was added.
.. versionadded:: 1.6.0
return_length: bool, optional
Return the number of points inside the radius instead of a list
of the indices.
.. versionadded:: 1.6.0
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = np.c_[x.ravel(), y.ravel()]
>>> tree = spatial.KDTree(points)
>>> sorted(tree.query_ball_point([2, 0], 1))
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
return super().query_ball_point(
x, r, p, eps, workers, return_sorted, return_length)
def query_ball_tree(self, other, r, p=2., eps=0):
"""
Find all pairs of points between `self` and `other` whose distance is
at most r.
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
Examples
--------
You can search all pairs of points between two kd-trees within a distance:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points1 = rng.random((15, 2))
>>> points2 = rng.random((15, 2))
>>> plt.figure(figsize=(6, 6))
>>> plt.plot(points1[:, 0], points1[:, 1], "xk", markersize=14)
>>> plt.plot(points2[:, 0], points2[:, 1], "og", markersize=14)
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
>>> for i in range(len(indexes)):
... for j in indexes[i]:
... plt.plot([points1[i, 0], points2[j, 0]],
... [points1[i, 1], points2[j, 1]], "-r")
>>> plt.show()
"""
return super().query_ball_tree(other, r, p, eps)
def query_pairs(self, r, p=2., eps=0, output_type='set'):
"""Find all pairs of points in `self` whose distance is at most r.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
output_type : string, optional
Choose the output container, 'set' or 'ndarray'. Default: 'set'
.. versionadded:: 1.6.0
Returns
-------
results : set or ndarray
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close. If output_type is 'ndarray', an ndarry is
returned instead of a set.
Examples
--------
You can search all pairs of points in a kd-tree within a distance:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points = rng.random((20, 2))
>>> plt.figure(figsize=(6, 6))
>>> plt.plot(points[:, 0], points[:, 1], "xk", markersize=14)
>>> kd_tree = KDTree(points)
>>> pairs = kd_tree.query_pairs(r=0.2)
>>> for (i, j) in pairs:
... plt.plot([points[i, 0], points[j, 0]],
... [points[i, 1], points[j, 1]], "-r")
>>> plt.show()
"""
return super().query_pairs(r, p, eps, output_type)
def count_neighbors(self, other, r, p=2., weights=None, cumulative=True):
"""Count how many nearby pairs can be formed.
Count the number of pairs ``(x1,x2)`` can be formed, with ``x1`` drawn
from ``self`` and ``x2`` drawn from ``other``, and where
``distance(x1, x2, p) <= r``.
Data points on ``self`` and ``other`` are optionally weighted by the
``weights`` argument. (See below)
This is adapted from the "two-point correlation" algorithm described by
Gray and Moore [1]_. See notes for further discussion.
Parameters
----------
other : KDTree
The other tree to draw points from, can be the same tree as self.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
If the count is non-cumulative(``cumulative=False``), ``r`` defines
the edges of the bins, and must be non-decreasing.
p : float, optional
1<=p<=infinity.
Which Minkowski p-norm to use.
Default 2.0.
A finite large p may cause a ValueError if overflow can occur.
weights : tuple, array_like, or None, optional
If None, the pair-counting is unweighted.
If given as a tuple, weights[0] is the weights of points in
``self``, and weights[1] is the weights of points in ``other``;
either can be None to indicate the points are unweighted.
If given as an array_like, weights is the weights of points in
``self`` and ``other``. For this to make sense, ``self`` and
``other`` must be the same tree. If ``self`` and ``other`` are two
different trees, a ``ValueError`` is raised.
Default: None
.. versionadded:: 1.6.0
cumulative : bool, optional
Whether the returned counts are cumulative. When cumulative is set
to ``False`` the algorithm is optimized to work with a large number
of bins (>10) specified by ``r``. When ``cumulative`` is set to
True, the algorithm is optimized to work with a small number of
``r``. Default: True
.. versionadded:: 1.6.0
Returns
-------
result : scalar or 1-D array
The number of pairs. For unweighted counts, the result is integer.
For weighted counts, the result is float.
If cumulative is False, ``result[i]`` contains the counts with
``(-inf if i == 0 else r[i-1]) < R <= r[i]``
Notes
-----
Pair-counting is the basic operation used to calculate the two point
correlation functions from a data set composed of position of objects.
Two point correlation function measures the clustering of objects and
is widely used in cosmology to quantify the large scale structure
in our Universe, but it may be useful for data analysis in other fields
where self-similar assembly of objects also occur.
The Landy-Szalay estimator for the two point correlation function of
``D`` measures the clustering signal in ``D``. [2]_
For example, given the position of two sets of objects,
- objects ``D`` (data) contains the clustering signal, and
- objects ``R`` (random) that contains no signal,
.. math::
\\xi(r) = \\frac{<D, D> - 2 f <D, R> + f^2<R, R>}{f^2<R, R>},
where the brackets represents counting pairs between two data sets
in a finite bin around ``r`` (distance), corresponding to setting
`cumulative=False`, and ``f = float(len(D)) / float(len(R))`` is the
ratio between number of objects from data and random.
The algorithm implemented here is loosely based on the dual-tree
algorithm described in [1]_. We switch between two different
pair-cumulation scheme depending on the setting of ``cumulative``.
The computing time of the method we use when for
``cumulative == False`` does not scale with the total number of bins.
The algorithm for ``cumulative == True`` scales linearly with the
number of bins, though it is slightly faster when only
1 or 2 bins are used. [5]_.
As an extension to the naive pair-counting,
weighted pair-counting counts the product of weights instead
of number of pairs.
Weighted pair-counting is used to estimate marked correlation functions
([3]_, section 2.2),
or to properly calculate the average of data per distance bin
(e.g. [4]_, section 2.1 on redshift).
.. [1] Gray and Moore,
"N-body problems in statistical learning",
Mining the sky, 2000,
https://arxiv.org/abs/astro-ph/0012333
.. [2] Landy and Szalay,
"Bias and variance of angular correlation functions",
The Astrophysical Journal, 1993,
http://adsabs.harvard.edu/abs/1993ApJ...412...64L
.. [3] Sheth, Connolly and Skibba,
"Marked correlations in galaxy formation models",
Arxiv e-print, 2005,
https://arxiv.org/abs/astro-ph/0511773
.. [4] Hawkins, et al.,
"The 2dF Galaxy Redshift Survey: correlation functions,
peculiar velocities and the matter density of the Universe",
Monthly Notices of the Royal Astronomical Society, 2002,
http://adsabs.harvard.edu/abs/2003MNRAS.346...78H
.. [5] https://github.com/scipy/scipy/pull/5647#issuecomment-168474926
Examples
--------
You can count neighbors number between two kd-trees within a distance:
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points1 = rng.random((5, 2))
>>> points2 = rng.random((5, 2))
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> kd_tree1.count_neighbors(kd_tree2, 0.2)
1
This number is same as the total pair number calculated by
`query_ball_tree`:
>>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
>>> sum([len(i) for i in indexes])
1
"""
return super().count_neighbors(other, r, p, weights, cumulative)
def sparse_distance_matrix(
self, other, max_distance, p=2., output_type='dok_matrix'):
"""Compute a sparse distance matrix.
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, 1<=p<=infinity
Which Minkowski p-norm to use.
A finite large p may cause a ValueError if overflow can occur.
output_type : string, optional
Which container to use for output data. Options: 'dok_matrix',
'coo_matrix', 'dict', or 'ndarray'. Default: 'dok_matrix'.
.. versionadded:: 1.6.0
Returns
-------
result : dok_matrix, coo_matrix, dict or ndarray
Sparse matrix representing the results in "dictionary of keys"
format. If a dict is returned the keys are (i,j) tuples of indices.
If output_type is 'ndarray' a record array with fields 'i', 'j',
and 'v' is returned,
Examples
--------
You can compute a sparse distance matrix between two kd-trees:
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> rng = np.random.default_rng()
>>> points1 = rng.random((5, 2))
>>> points2 = rng.random((5, 2))
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> sdm = kd_tree1.sparse_distance_matrix(kd_tree2, 0.3)
>>> sdm.toarray()
array([[0. , 0. , 0.12295571, 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ],
[0.28942611, 0. , 0. , 0.2333084 , 0. ],
[0. , 0. , 0. , 0. , 0. ],
[0.24617575, 0.29571802, 0.26836782, 0. , 0. ]])
You can check distances above the `max_distance` are zeros:
>>> from scipy.spatial import distance_matrix
>>> distance_matrix(points1, points2)
array([[0.56906522, 0.39923701, 0.12295571, 0.8658745 , 0.79428925],
[0.37327919, 0.7225693 , 0.87665969, 0.32580855, 0.75679479],
[0.28942611, 0.30088013, 0.6395831 , 0.2333084 , 0.33630734],
[0.31994999, 0.72658602, 0.71124834, 0.55396483, 0.90785663],
[0.24617575, 0.29571802, 0.26836782, 0.57714465, 0.6473269 ]])
"""
return super().sparse_distance_matrix(
other, max_distance, p, output_type)
def distance_matrix(x, y, p=2, threshold=1000000):
"""Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
Matrix of M vectors in K dimensions.
y : (N, K) array_like
Matrix of N vectors in K dimensions.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Matrix containing the distance from every vector in `x` to every vector
in `y`.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.22/_downloads/e8440d4a71ce3cd53b39ebc6f55d87ec/plot_linear_regression_raw.py | 18 | 2385 | """
========================================
Regression on continuous data (rER[P/F])
========================================
This demonstrates how rER[P/F]s - regressing the continuous data - is a
generalisation of traditional averaging. If all preprocessing steps
are the same, no overlap between epochs exists, and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate effects, including those of
continuous predictors.
rERPs are described in:
Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
# Authors: Jona Sassenhagen <jona.sassenhagen@gmail.de>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.stats.regression import linear_regression_raw
# Load and preprocess data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg='grad', stim=True, eeg=False).load_data()
raw.filter(1, None, fir_design='firwin') # high-pass
# Set up events
events = mne.find_events(raw)
event_id = {'Aud/L': 1, 'Aud/R': 2}
tmin, tmax = -.1, .5
# regular epoching
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
reject=None, tmin=tmin, tmax=tmax)
# linear_regression_raw returns a dict of evokeds
# select conditions similarly to mne.Epochs objects
# plot both results, and their difference
cond = "Aud/L"
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
params = dict(spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)),
time_unit='s')
epochs[cond].average().plot(axes=ax1, **params)
evokeds[cond].plot(axes=ax2, **params)
contrast = mne.combine_evoked([evokeds[cond], epochs[cond].average()],
weights=[1, -1])
contrast.plot(axes=ax3, **params)
ax1.set_title("Traditional averaging")
ax2.set_title("rERF")
ax3.set_title("Difference")
plt.show()
| bsd-3-clause |
MikeDelaney/sentiment | skeleton.py | 1 | 2705 | import sys, os
import numpy as np
from operator import itemgetter as ig
from sklearn.linear_model import LogisticRegression as LR
from collections import Counter
import string
# vocab = [] # the features used in the classifier
# build vocabulary
def buildvocab(numwords):
vocab = []
temp_words = []
base_dir = os.getcwd()
training_dirs = [base_dir + '/' + dirname for dirname in ('pos', 'neg')]
stopwords = open('stopwords.txt').read().lower().split()
# Populate vocab list with numwords most frequent words in training data,
# minus stopwords
for training_dir in training_dirs:
for training_file in os.listdir(training_dir):
with open(training_dir + '/' + training_file) as f:
words = f.read().lower().split()
for word in words:
if word not in stopwords and word not in string.punctuation:
temp_words.append(word)
# Counter returns list of ('word', count)
vocab = [item[0] for item in Counter(temp_words).most_common(numwords)]
return vocab
def vectorize(file_name, vocab):
vector = np.zeros(len(vocab))
with open(file_name) as fn:
review = []
review.extend(fn.read().lower().split())
for word in review:
try:
vector[vocab.index(word)] += 1
except ValueError:
pass
return vector
def make_classifier(vocab):
base_dir = os.getcwd()
directories = [base_dir + '/' + dirname for dirname in ('pos', 'neg')]
reviews = []
y = []
for directory in directories:
for review_file in os.listdir(directory):
reviews.append(vectorize(directory + '/' + review_file, vocab))
if os.path.basename(directory) == 'pos':
y.append(1)
else:
y.append(-1)
reviews = np.asarray(reviews)
y = np.asarray(y)
lr = LR()
lr.fit(reviews, y)
return lr
def test_classifier(lr, vocab):
test = np.zeros((len(os.listdir('test')), len(vocab)))
testfn = []
i = 0
y = []
for fn in os.listdir('test'):
testfn.append(fn)
test[i] = vectorize(os.path.join('test', fn), vocab)
ind = int(fn.split('_')[0][-1])
y.append(1 if ind == 3 else -1)
i += 1
assert(sum(y) == 0)
p = lr.predict(test)
r, w = 0, 0
for i, x in enumerate(p):
if x == y[i]:
r += 1
else:
w += 1
print(testfn[i])
print(r, w)
if __name__ == '__main__':
numwords = int(raw_input("\nEnter number of words for classifiers: "))
vocab = buildvocab(numwords)
lr = make_classifier(vocab)
test_classifier(lr, vocab)
| mit |
BDannowitz/polymath-progression-blog | jlab-ml-lunch-2/src/jlab.py | 1 | 8171 | import re
from io import StringIO
import pandas as pd
import numpy as np
from math import floor
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from tensorflow.keras.preprocessing.sequence import pad_sequences
COLS = ['x', 'y', 'z', 'px', 'py', 'pz', 'x1', 'y1', 'z1', 'px1', 'py1',
'pz1', 'x2', 'y2', 'z2', 'px2', 'py2', 'pz2', 'x3', 'y3', 'z3',
'px3', 'py3', 'pz3', 'x4', 'y4', 'z4', 'px4', 'py4', 'pz4', 'x5',
'y5', 'z5', 'px5', 'py5', 'pz5', 'x6', 'y6', 'z6', 'px6', 'py6',
'pz6', 'x7', 'y7', 'z7', 'px7', 'py7', 'pz7', 'x8', 'y8', 'z8',
'px8', 'py8', 'pz8', 'x9', 'y9', 'z9', 'px9', 'py9', 'pz9', 'x10',
'y10', 'z10', 'px10', 'py10', 'pz10', 'x11', 'y11', 'z11', 'px11',
'py11', 'pz11', 'x12', 'y12', 'z12', 'px12', 'py12', 'pz12', 'x13',
'y13', 'z13', 'px13', 'py13', 'pz13', 'x14', 'y14', 'z14', 'px14',
'py14', 'pz14', 'x15', 'y15', 'z15', 'px15', 'py15', 'pz15', 'x16',
'y16', 'z16', 'px16', 'py16', 'pz16', 'x17', 'y17', 'z17', 'px17',
'py17', 'pz17', 'x18', 'y18', 'z18', 'px18', 'py18', 'pz18', 'x19',
'y19', 'z19', 'px19', 'py19', 'pz19', 'x20', 'y20', 'z20', 'px20',
'py20', 'pz20', 'x21', 'y21', 'z21', 'px21', 'py21', 'pz21', 'x22',
'y22', 'z22', 'px22', 'py22', 'pz22', 'x23', 'y23', 'z23', 'px23',
'py23', 'pz23', 'x24', 'y24', 'z24', 'px24', 'py24', 'pz24']
Z_VALS = [ 65. , 176.944, 179.069, 181.195, 183.32 , 185.445, 187.571,
235.514, 237.639, 239.765, 241.89 , 244.015, 246.141, 294.103,
296.228, 298.354, 300.479, 302.604, 304.73 , 332.778, 334.903,
337.029, 339.154, 341.28 , 343.405]
Z_DIST = [
111.94400000000002, 2.125, 2.1259999999999764, 2.125, 2.125,
2.1260000000000048, 47.94300000000001, 2.125, 2.1259999999999764,
2.125, 2.125, 2.1260000000000048, 47.96200000000002,
2.125, 2.126000000000033, 2.125,
2.125, 2.1259999999999764, 28.048000000000002, 2.125,
2.1259999999999764, 2.125, 2.1259999999999764, 2.125, 0.0
]
N_DETECTORS = 25
N_KINEMATICS = 6
N_FEATURES = 13
def load_test_data(filename, cols=COLS):
"""Read the specific test file format to a dataframe
All this hullabaloo just to chop off the last number
of each line.
Parameters
----------
filename : str
cols : list of str
Returns
-------
pandas.DataFrame
"""
with open(filename, 'r') as f:
data_str = f.read().replace(' ', '')
data_str_io = StringIO(
re.sub(r"([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?\n)", r",,\1",
data_str)
)
X_test = pd.read_csv(data_str_io, names=cols)
return X_test
def load_train_test(frac):
"""Load training and validation data from file
Parameters
----------
frac : float
Percentage of training data to use in training
Returns
-------
(numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray)
Tuple of the training and test data, the training
and test labels
"""
# Read in raw data
X_train = (pd.read_csv('MLchallenge2_training.csv')
.sample(frac=frac)
.reset_index(drop=True))
X_test = load_test_data('test_in.csv')
# Also, load our truth values
y_test = pd.read_csv('test_prediction.csv',
names=['x', 'y', 'px', 'py', 'pz'],
header=None)
X_train_array, y_train_array = train_to_time_series(X_train)
y_test_array = y_test.values
X_test_array = test_to_time_series(X_test)
return X_train_array, X_test_array, y_train_array, y_test_array
def get_detector_meta(kin_array, det_id):
# Is there a large gap after this detector?
# 0 is for padded timesteps
# 1 is for No, 2 is for Yes
mind_the_gap = int(det_id % 6 == 0) + 1
# Detector group: 1 (origin), 2, 3, 4, or 5
det_grp = floor((det_id-1) / 6) + 2
# Detectors numbered 1-6 (origin is 6)
# (Which one in the group of six is it?)
det_rank = ((det_id-1) % 6) + 1
# Distance to the next detector?
z_dist = Z_DIST[det_id]
# Transverse momentum (x-y component)
pt = np.sqrt(np.square(kin_array[3]) + np.square(kin_array[4]))
# Total momentum
p_tot = np.sqrt(np.square(kin_array[3])
+ np.square(kin_array[4])
+ np.square(kin_array[5]))
# Put all the calculated features together
det_meta = np.array([det_id, mind_the_gap, det_grp, det_rank,
z_dist, pt, p_tot])
# Return detector data plus calculated features
return np.concatenate([kin_array, det_meta], axis=None)
def train_to_time_series(X):
"""Convert training dataframe to multivariate time series training set
Pivots each track to a series ot timesteps. Then randomly truncates them
to be identical to the provided test set. The step after the truncated
step is saved as the target.
Truncated sequence are front-padded with zeros.
Parameters
----------
X : pandas.DataFrame
Returns
-------
(numpy.ndarray, numpy.ndarray)
Tuple of the training data and labels
"""
X_ts_list = []
n_samples = len(X)
y_array = np.ndarray(shape=(n_samples, N_KINEMATICS-1))
for ix in range(n_samples):
# Randomly choose how many detectors the track went through
track_len = np.random.choice(range(8, 25))
# Reshape into ts-like
track = X.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)
#eng_track = np.zeros(shape=(N_DETECTORS, N_FEATURES))
#for i in range(0, N_DETECTORS):
# eng_track[i] = get_detector_meta(track[i], i)
# Truncate the track to only N detectors
#X_ts_list.append(eng_track[0:track_len])
X_ts_list.append(track[0:track_len])
# Store the kinematics of the next in the sequence
# Ignore the 3rd one, which is z
y_array[ix] = track[track_len][[0,1,3,4,5]]
# Pad the training sequence
X_ts_list = pad_sequences(X_ts_list, dtype=float)
X_ts_array = np.array(X_ts_list)
return X_ts_array, y_array
def test_to_time_series(X):
"""Convert test data dataframe into (24, 6) time series.
Time series is front-padded with zeros.
Parameters
----------
X : pandas.DataFrame
Returns
-------
numpy.ndarray
Shape is (len(X), 24, 6)
"""
X_ts_list = []
for ix in range(len(X)):
seq_len = get_test_detector_plane(X.iloc[ix])
track = X.iloc[ix].values.reshape(N_DETECTORS, N_KINEMATICS)
#eng_track = np.zeros(shape=(N_DETECTORS, N_FEATURES))
#for i in range(0, seq_len):
# eng_track[i] = get_detector_meta(track[i], i)
# Truncate the track to only N detectors
X_ts_list.append(track[0:seq_len])
# Pad the training sequence
X_ts_list = pad_sequences(X_ts_list, maxlen=(N_DETECTORS-1), dtype=float)
X_ts_array = np.array(X_ts_list)
return X_ts_array
def get_test_detector_plane(row):
"""Identifies the number of the plane that'll be evaluated
Surprisingly handy for various data wrangling operations.
Parameters
----------
row : pandas.Series
Returns
-------
int
"""
# Find location of nans, get the first one
# Then divide by 6 (6 values per detector plane)
plane = np.where(np.isnan(row.values))[0][0]/6
return int(plane)
def plot_one_track_position(df, track_id):
"""For 3D visualization of one track."""
track = df.loc[track_id].values
x = [track[(6*i)] for i in range(1, 25)]
y = [track[1+(6*i)] for i in range(1, 25)]
z = [track[2+(6*i)] for i in range(1, 25)]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(z, x, y)
ax.set_title("Track {}".format(track_id))
ax.set_xlabel("z", fontweight="bold")
ax.set_ylabel("x", fontweight="bold")
ax.set_zlabel("y", fontweight="bold")
plt.show()
| gpl-2.0 |
meduz/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
dsm054/pandas | asv_bench/benchmarks/panel_ctor.py | 3 | 1731 | import warnings
from datetime import datetime, timedelta
from pandas import DataFrame, Panel, DatetimeIndex, date_range
class DifferentIndexes(object):
def setup(self):
self.data_frames = {}
start = datetime(1990, 1, 1)
end = datetime(2012, 1, 1)
for x in range(100):
end += timedelta(days=1)
idx = date_range(start, end)
df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx)
self.data_frames[x] = df
def time_from_dict(self):
with warnings.catch_warnings(record=True):
Panel.from_dict(self.data_frames)
class SameIndexes(object):
def setup(self):
idx = DatetimeIndex(start=datetime(1990, 1, 1),
end=datetime(2012, 1, 1),
freq='D')
df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx)
self.data_frames = dict(enumerate([df] * 100))
def time_from_dict(self):
with warnings.catch_warnings(record=True):
Panel.from_dict(self.data_frames)
class TwoIndexes(object):
def setup(self):
start = datetime(1990, 1, 1)
end = datetime(2012, 1, 1)
df1 = DataFrame({'a': 0, 'b': 1, 'c': 2},
index=DatetimeIndex(start=start, end=end, freq='D'))
end += timedelta(days=1)
df2 = DataFrame({'a': 0, 'b': 1, 'c': 2},
index=DatetimeIndex(start=start, end=end, freq='D'))
dfs = [df1] * 50 + [df2] * 50
self.data_frames = dict(enumerate(dfs))
def time_from_dict(self):
with warnings.catch_warnings(record=True):
Panel.from_dict(self.data_frames)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
aewhatley/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
alpenwasser/laborjournal | versuche/skineffect/python/vollzylinder_highfreq_approx_low.py | 1 | 8960 | #!/usr/bin/env python3
from sympy import *
from mpmath import *
from matplotlib.pyplot import *
import matplotlib.ticker as plticker
#init_printing() # make things prettier when we print stuff for debugging.
# ************************************************************************** #
# B-Field, Cylinder Coil with Massive Alu Cylinder #
# #
# High frequenzy approximation applied to low frequency measurements. This #
# should not give an accurate match between curve and measurement points. #
# ************************************************************************** #
# All values are in standard SI units unless otherwise noted.
# ---------------------------------------------------------#
# Define Variables and Constants #
# ---------------------------------------------------------#
mu0 = 4*pi*1e-7
rho_kuchling = 0.027e-6 # resistivity Kuchling 17th edition, p.649, tab. 45
sigma_kuchling = 1/rho_kuchling
#sigma = 37.7e6 # conductivity of aluminium (de.wikipedia.org)
#sigma = 18e6 # affects phase
sigma = 17e6 # affects phase
B0 = 6.2e-2 # does not affect phase, use for scaling abs(B)
r0 = 45e-3
freq = 30 # frequency was fixed at 30 Hz
npts = 1e3
rmin=0
rmax=45e-3
# -----------------------------------------------------#
# Create a list for convenient printing of vars to #
# file, add LaTeX where necessary. #
# -----------------------------------------------------#
params = [
' ' + r'\textcolor{red}{$\sigma_{Fit}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + r'\textcolor{red}{$\sigma_{Kuch}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_kuchling) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + '$\mu_0' + '$ & $' + '\SI{' + str(mu0) + r'}{\newton\per\ampere\squared}' + r'$\\' + "\n",
' ' + '$\sigma' + '$ & $' + '\SI{' + str(sigma) + r'}{\ampere\per\volt\per\meter}' + r'$\\' + "\n",
' ' + '$r_{max}' + '$ & $' + '\SI{' + str(rmax) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_{min}' + '$ & $' + '\SI{' + str(rmin) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$B_0' + '$ & $' + '\SI{' + str(B0) + r'}{\tesla}' + r'$\\' + "\n",
' ' + '$NPTS' + '$ & $' + r'\num{' + str(npts) + '}' + r'$\\' + "\n",
' ' + '$f' + '$ & $' + '\SI{' + str(freq) + r'}{\hertz}' + r'$\\' + "\n",
]
font = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 9,
}
titlefont = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 10,
}
plot_legend_fontsize = 9
plot_color_fit = 'blue'
plot_color_measurements = 'black'
plot_label_measurements = 'Messwerte'
plot_size_measurements = 16
plot_scale_x = 'linear'
plot_label_fit = 'Fit-Funktion'
plot_label_x = 'radiale Position bezogen auf Zylinderachse (mm)'
plot_11_label_y = 'gemessene Spannung (mV)'
plot_21_label_y = 'gemessene Spannung (mV)'
plot_12_label_y = 'Phase (Grad)'
plot_21_title = r"Hochfrequenzn\"aherung: Betrag Magnetfeld Spule mit Vollzylinder (30 Hz)"
plot_22_title = r"Hochfrequenzn\"aherung: Phase Magnetfeld Spule mit Vollzylinder (30 Hz)"
# Set ticker intervals for plots (in millimeters)
loc2 = plticker.MultipleLocator(base=5)
# ---------------------------------------------------------#
# Function for magnetic Field B #
# ---------------------------------------------------------#
# See formula 11 on p.8 of script for experiment.
s_skin = sqrt(2/(2*pi*freq*mu0*sigma))
x = lambda r: r0-r
B = lambda r: B0 * exp(-x(r)/s_skin) * exp(mpc(0,-x(r)/s_skin))
B_abs = lambda r: abs(B(r))
B_arg = lambda r: arg(B(r))
# ---------------------------------------------------------#
# Generate points for radius axis #
# ---------------------------------------------------------#
radii = np.linspace(rmin,rmax,npts)
# ---------------------------------------------------------#
# Numerically evaluate function #
# ---------------------------------------------------------#
Babsufunc = np.frompyfunc(B_abs,1,1)
Bargufunc = np.frompyfunc(B_arg,1,1)
B_abs_num = Babsufunc(radii)
B_arg_num = Bargufunc(radii)
# ---------------------------------------------------------#
# Unfortunately, the arg() function only delivers values #
# between -pi and +pi for the angle of a complex number, #
# which, while correct, is not suitable for pretty #
# plotting, so we will shift the values larger then zero #
# accordingly for a continuous curve. #
# ---------------------------------------------------------#
B_arg_num = np.unwrap(B_arg_num)
# ---------------------------------------------------------#
# Measurement Values from the actual experiment #
# ---------------------------------------------------------#
radii_measured = np.array([ 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50])
voltages = np.array([2.86e-2,2.85e-2,2.87e-2,2.9e-2,3e-2,3.3e-2,3.8e-2,4.5e-2,5.4e-2,6.2e-2,3.7e-2])
phases_degrees = np.array([ 111, 109, 104, 94, 81, 65, 48.5, 32, 16, 2.7, 0])
# ---------------------------------------------------------#
# Scale values for improved legibility in plot #
# ---------------------------------------------------------#
# We scale from meters to millimeters, from rad to degress.
B_abs_num = 1e3 * B_abs_num
radii = 1e3 * radii
voltages = 1e3 * voltages
B_arg_num = 180/pi*B_arg_num
rmin = 1e3 * rmin
rmax = 1e3 * rmax
# ---------------------------------------------------------#
# Plot the Things #
# ---------------------------------------------------------#
matplotlib.pyplot.rc('text', usetex=True)
matplotlib.pyplot.rc('font', family='serif')
fig2 = figure(2)
axes21 = fig2.add_subplot(211)
axes21.plot(radii,B_abs_num,color=plot_color_fit,label=plot_label_fit)
axes21.scatter(radii_measured,
voltages,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes21.set_xlim([rmin-5,rmax*1.1])
axes21.set_xscale(plot_scale_x)
axes21.set_xlabel(plot_label_x,fontdict=font)
axes21.set_ylabel(plot_21_label_y,fontdict=font)
axes21.set_title(plot_21_title,fontdict=titlefont)
axes21.legend(fontsize=plot_legend_fontsize,loc='upper left')
axes21.xaxis.set_major_locator(loc2)
axes21.tick_params(labelsize=9)
axes22 = fig2.add_subplot(212)
axes22.plot(radii,B_arg_num,color=plot_color_fit,label=plot_label_fit)
axes22.scatter(radii_measured,
-phases_degrees,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes22.set_xlim([rmin-5,rmax*1.1])
axes22.set_xscale(plot_scale_x)
axes22.set_xlabel(plot_label_x,fontdict=font)
axes22.set_ylabel(plot_12_label_y,fontdict=font)
axes22.set_title(plot_22_title,fontdict=titlefont)
axes22.legend(fontsize=plot_legend_fontsize,loc='upper left')
axes22.xaxis.set_major_locator(loc2)
axes22.tick_params(labelsize=9)
fig2.subplots_adjust(bottom=0.1,left=0.1,right=0.9,top=0.95,hspace=0.5)
fig2.savefig('plots-pgf/massive--alu--high-freq-approx--low.pgf')
fig2.savefig('plots-pdf/massive--alu--high-freq-approx--low.pdf')
# ---------------------------------------------------------#
# Save listing to file #
# ---------------------------------------------------------#
dumpfile = open('listings/massive--alu--high-freq-approx--low.tex', 'w')
table_opening = r"""
{%
\begin{center}
\captionof{table}{%
Paramaterwerte f\"ur Fit-Funktion aus Abbildung
\ref{fig:alu:rad:approx:low}
}
\label{tab:fitparams:alu:rad:approx:low}
\sisetup{%
%math-rm=\mathtt,
scientific-notation=engineering,
table-format = +3.2e+2,
round-precision = 3,
round-mode = figures,
}
\begin{tabular}{lr}
\toprule
"""
table_closing = r"""
\bottomrule
\end{tabular}
\end{center}
}
"""
dumpfile.writelines(table_opening)
for line in params:
dumpfile.writelines(line)
dumpfile.writelines(table_closing)
dumpfile.close()
| mit |
XianliangJ/collections | RCP-NS2/scripts/plot.py | 1 | 3051 | import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy
import sys
# Parse command-line arguments.
RCP_DATA_FILE = sys.argv[1]
TCP_DATA_FILE = sys.argv[2]
PLOT_FILE = sys.argv[3]
# Bottleneck link speed in b/s.
C = float(sys.argv[4]) * 1000000000
# RTT in seconds.
RTT = float(sys.argv[5])
RHO = float(sys.argv[6])
PKT_SIZE = 8 * 1000
SHAPE = float(sys.argv[7])
MEAN = float(sys.argv[8])
# Flow sizes in terms of packets. Used for generating slow-start and PS lines.
FLOW_SIZES = range(0, 100000, 10)
# ---------------------------------------------------------------------------- #
def gen_slowstart():
"""Generates the TCP slow-start line."""
ss_vals = []
for L in FLOW_SIZES:
ss_vals.append(((math.log(L + 1, 2) + 0.5) * RTT) + L / C)
return ss_vals
def gen_ps():
"""Generates the processor sharing line."""
ps_vals = []
for L in FLOW_SIZES:
ps_vals.append(1.5 * RTT + (L * PKT_SIZE / (C * (1 - RHO))))
return ps_vals
def parse_data_file(data_file):
"""Parses a data file. Should contain lines of the form "size duration"."""
flow_sizes = []
flow_durations = []
with open(data_file, 'r') as logfile:
for line in logfile:
values = line.split()
flow_size, flow_duration = values[0], values[1]
flow_sizes.append(flow_size)
flow_durations.append(flow_duration)
return (flow_sizes, flow_durations)
def parse_rcp():
return parse_data_file(RCP_DATA_FILE)
def parse_tcp():
return parse_data_file(TCP_DATA_FILE)
def plot_semilog(ps_data, ss_data, rcp_x, rcp_y, tcp_x, tcp_y):
"""Generates the semilog plot."""
plt.xlabel('Flow Size [pkts] (normal scale)')
plt.ylabel('Average Flow Completion Time [sec]')
plt.yscale('log')
plt.axis([0, 2000, 0.1, 100])
plt.plot(tcp_x, tcp_y, 'g.-', label = 'TCP (avg.)')
plt.plot(rcp_x, rcp_y, 'b+-', label='RCP (avg.)')
plt.plot(FLOW_SIZES, ss_data, 'r-', label='Slow-Start')
plt.plot(FLOW_SIZES, ps_data, 'r--', label='PS')
plt.legend(loc='upper right')
plt.savefig("semilog-" + PLOT_FILE)
plt.close()
def plot_loglog(ps_data, ss_data, rcp_x, rcp_y, tcp_x, tcp_y):
"""Generates the loglog plot."""
plt.xlabel('Flow Size [pkts] (log)')
plt.xscale('log')
plt.ylabel('Average Flow Completion Time [sec]')
plt.yscale('log')
plt.axis([1000, 100000, 0.1, 100])
plt.plot(tcp_x, tcp_y, 'g.-', label = 'TCP (avg.)')
plt.plot(rcp_x, rcp_y, 'b+-', label='RCP (avg.)')
plt.plot(FLOW_SIZES, ss_data, 'r-', label='Slow-Start')
plt.plot(FLOW_SIZES, ps_data, 'r--', label='PS')
plt.legend(loc='upper right')
plt.savefig("loglog-" + PLOT_FILE)
plt.close()
# Parse data.
ps_line = gen_ps()
ss_line = gen_slowstart()
(rcp_x, rcp_y) = parse_data_file(RCP_DATA_FILE)
(tcp_x, tcp_y) = parse_data_file(TCP_DATA_FILE)
# Generate plots.
plot_semilog(ps_line, ss_line, rcp_x, rcp_y, tcp_x, tcp_y)
plot_loglog(ps_line, ss_line, rcp_x, rcp_y, tcp_x, tcp_y)
| gpl-3.0 |
Titan-C/scikit-learn | examples/linear_model/plot_iris_logistic.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
aarchiba/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/en/cluster/plot_mini_batch_kmeans.py | 53 | 4096 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
# #############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
# #############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
# #############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
# #############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| gpl-3.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/pylab.py | 8 | 11110 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
MATLAB |reg| [*]_ analogs and similar arguments.
.. |reg| unicode:: 0xAE
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
autoscale - turn axis autoscaling on or off, and apply it
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
violinplot - make a violin plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imsave - save array as an image file
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
locator_params - adjust parameters used in locating axis ticks
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
margins - set margins used in autoscaling
pause - pause for a specified interval
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make one subplot (numrows, numcols, axesnum)
subplots - make a figure with a set of (numrows, numcols) subplots
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
tick_params - control the appearance of ticks and tick labels
ticklabel_format - control the format of tick labels
title - add a title to the current axes
tricontour - make a contour plot on a triangular grid
tricontourf - make a filled contour plot on a triangular grid
tripcolor - make a pseudocolor plot on a triangular grid
triplot - plot a triangular grid
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
amax - the maximum along dimension m
amin - the minimum along dimension m
corrcoef - correlation coefficient
cov - covariance matrix
mean - the mean along dimension m
median - the median along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
ksdensity - the kernel density estimate
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - Deprecated--please use loadtxt.
loadtxt - load ASCII data into array.
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - Deprecated--please use savetxt.
savetxt - save an array to an ASCII file.
trapz - trapezoidal integration
__end
.. [*] MATLAB is a registered trademark of The MathWorks, Inc.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import sys, warnings
from matplotlib.cbook import flatten, is_string_like, exception_to_str, \
silent_list, iterable, dedent
import matplotlib as mpl
# make mpl.finance module available for backwards compatability, in case folks
# using pylab interface depended on not having to import it
import matplotlib.finance
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates # Do we need this at all?
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
## We are still importing too many things from mlab; more cleanup is needed.
from matplotlib.mlab import griddata, stineman_interp, slopes, \
inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from matplotlib.mlab import window_hanning, window_none, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, \
find, longest_contiguous_ones, longest_ones, \
prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, \
get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, movavg, \
exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, identity, \
base_repr, binary_repr, log2, ispower2, \
rec_append_fields, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
# don't let numpy's datetime hide stdlib
import datetime
# This is needed, or bytes will be numpy.random.bytes from
# "from numpy.random import *" above
bytes = __builtins__['bytes']
| apache-2.0 |
Ryanglambert/pybrain | examples/rl/environments/linear_fa/bicycle.py | 26 | 14462 | from __future__ import print_function
"""An attempt to implement Randlov and Alstrom (1998). They successfully
use reinforcement learning to balance a bicycle, and to control it to drive
to a specified goal location. Their work has been used since then by a few
researchers as a benchmark problem.
We only implement the balance task. This implementation differs at least
slightly, since Randlov and Alstrom did not mention anything about how they
annealed/decayed their learning rate, etc. As a result of differences, the
results do not match those obtained by Randlov and Alstrom.
"""
__author__ = 'Chris Dembia, Bruce Cam, Johnny Israeli'
from scipy import asarray
from numpy import sin, cos, tan, sqrt, arcsin, arctan, sign, clip, argwhere
from matplotlib import pyplot as plt
import pybrain.rl.environments
from pybrain.rl.environments.environment import Environment
from pybrain.rl.learners.valuebased.linearfa import SARSALambda_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.utilities import one_to_n
class BicycleEnvironment(Environment):
"""Randlov and Alstrom's bicycle model. This code matches nearly exactly
some c code we found online for simulating Randlov and Alstrom's
bicycle. The bicycle travels at a fixed speed.
"""
# For superclass.
indim = 2
outdim = 10
# Environment parameters.
time_step = 0.01
# Goal position and radius
# Lagouakis (2002) uses angle to goal, not heading, as a state
max_distance = 1000.
# Acceleration on Earth's surface due to gravity (m/s^2):
g = 9.82
# See the paper for a description of these quantities:
# Distances (in meters):
c = 0.66
dCM = 0.30
h = 0.94
L = 1.11
r = 0.34
# Masses (in kilograms):
Mc = 15.0
Md = 1.7
Mp = 60.0
# Velocity of a bicycle (in meters per second), equal to 10 km/h:
v = 10.0 * 1000.0 / 3600.0
# Derived constants.
M = Mc + Mp # See Randlov's code.
Idc = Md * r**2
Idv = 1.5 * Md * r**2
Idl = 0.5 * Md * r**2
Itot = 13.0 / 3.0 * Mc * h**2 + Mp * (h + dCM)**2
sigmad = v / r
def __init__(self):
Environment.__init__(self)
self.reset()
self.actions = [0.0, 0.0]
self._save_wheel_contact_trajectories = False
def performAction(self, actions):
self.actions = actions
self.step()
def saveWheelContactTrajectories(self, opt):
self._save_wheel_contact_trajectories = opt
def step(self):
# Unpack the state and actions.
# -----------------------------
# Want to ignore the previous value of omegadd; it could only cause a
# bug if we assign to it.
(theta, thetad, omega, omegad, _,
xf, yf, xb, yb, psi) = self.sensors
(T, d) = self.actions
# For recordkeeping.
# ------------------
if self._save_wheel_contact_trajectories:
self.xfhist.append(xf)
self.yfhist.append(yf)
self.xbhist.append(xb)
self.ybhist.append(yb)
# Intermediate time-dependent quantities.
# ---------------------------------------
# Avoid divide-by-zero, just as Randlov did.
if theta == 0:
rf = 1e8
rb = 1e8
rCM = 1e8
else:
rf = self.L / np.abs(sin(theta))
rb = self.L / np.abs(tan(theta))
rCM = sqrt((self.L - self.c)**2 + self.L**2 / tan(theta)**2)
phi = omega + np.arctan(d / self.h)
# Equations of motion.
# --------------------
# Second derivative of angular acceleration:
omegadd = 1 / self.Itot * (self.M * self.h * self.g * sin(phi)
- cos(phi) * (self.Idc * self.sigmad * thetad
+ sign(theta) * self.v**2 * (
self.Md * self.r * (1.0 / rf + 1.0 / rb)
+ self.M * self.h / rCM)))
thetadd = (T - self.Idv * self.sigmad * omegad) / self.Idl
# Integrate equations of motion using Euler's method.
# ---------------------------------------------------
# yt+1 = yt + yd * dt.
# Must update omega based on PREVIOUS value of omegad.
omegad += omegadd * self.time_step
omega += omegad * self.time_step
thetad += thetadd * self.time_step
theta += thetad * self.time_step
# Handlebars can't be turned more than 80 degrees.
theta = np.clip(theta, -1.3963, 1.3963)
# Wheel ('tyre') contact positions.
# ---------------------------------
# Front wheel contact position.
front_temp = self.v * self.time_step / (2 * rf)
# See Randlov's code.
if front_temp > 1:
front_temp = sign(psi + theta) * 0.5 * np.pi
else:
front_temp = sign(psi + theta) * arcsin(front_temp)
xf += self.v * self.time_step * -sin(psi + theta + front_temp)
yf += self.v * self.time_step * cos(psi + theta + front_temp)
# Rear wheel.
back_temp = self.v * self.time_step / (2 * rb)
# See Randlov's code.
if back_temp > 1:
back_temp = np.sign(psi) * 0.5 * np.pi
else:
back_temp = np.sign(psi) * np.arcsin(back_temp)
xb += self.v * self.time_step * -sin(psi + back_temp)
yb += self.v * self.time_step * cos(psi + back_temp)
# Preventing numerical drift.
# ---------------------------
# Copying what Randlov did.
current_wheelbase = sqrt((xf - xb)**2 + (yf - yb)**2)
if np.abs(current_wheelbase - self.L) > 0.01:
relative_error = self.L / current_wheelbase - 1.0
xb += (xb - xf) * relative_error
yb += (yb - yf) * relative_error
# Update heading, psi.
# --------------------
delta_y = yf - yb
if (xf == xb) and delta_y < 0.0:
psi = np.pi
else:
if delta_y > 0.0:
psi = arctan((xb - xf) / delta_y)
else:
psi = sign(xb - xf) * 0.5 * np.pi - arctan(delta_y / (xb - xf))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
def reset(self):
theta = 0
thetad = 0
omega = 0
omegad = 0
omegadd = 0
xf = 0
yf = self.L
xb = 0
yb = 0
psi = np.arctan((xb - xf) / (yf - yb))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
self.xfhist = []
self.yfhist = []
self.xbhist = []
self.ybhist = []
def getSteer(self):
return self.sensors[0]
def getTilt(self):
return self.sensors[2]
def get_xfhist(self):
return self.xfhist
def get_yfhist(self):
return self.yfhist
def get_xbhist(self):
return self.xbhist
def get_ybhist(self):
return self.ybhist
def getSensors(self):
return self.sensors
class BalanceTask(pybrain.rl.environments.EpisodicTask):
"""The rider is to simply balance the bicycle while moving with the
speed perscribed in the environment. This class uses a continuous 5
dimensional state space, and a discrete state space.
This class is heavily guided by
pybrain.rl.environments.cartpole.balancetask.BalanceTask.
"""
max_tilt = np.pi / 6.
nactions = 9
def __init__(self, max_time=1000.0):
super(BalanceTask, self).__init__(BicycleEnvironment())
self.max_time = max_time
# Keep track of time in case we want to end episodes based on number of
# time steps.
self.t = 0
@property
def indim(self):
return 1
@property
def outdim(self):
return 5
def reset(self):
super(BalanceTask, self).reset()
self.t = 0
def performAction(self, action):
"""Incoming action is an int between 0 and 8. The action we provide to
the environment consists of a torque T in {-2 N, 0, 2 N}, and a
displacement d in {-.02 m, 0, 0.02 m}.
"""
self.t += 1
assert round(action[0]) == action[0]
# -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for
# action in {6, 7, 8}
torque_selector = np.floor(action[0] / 3.0) - 1.0
T = 2 * torque_selector
# Random number in [-1, 1]:
p = 2.0 * np.random.rand() - 1.0
# -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for
# action in {2, 5, 8}
disp_selector = action[0] % 3 - 1.0
d = 0.02 * disp_selector + 0.02 * p
super(BalanceTask, self).performAction([T, d])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
return self.env.getSensors()[0:5]
def isFinished(self):
# Criterion for ending an episode. From Randlov's paper:
# "When the agent can balance for 1000 seconds, the task is considered
# learned."
if np.abs(self.env.getTilt()) > self.max_tilt:
return True
elapsed_time = self.env.time_step * self.t
if elapsed_time > self.max_time:
return True
return False
def getReward(self):
# -1 reward for falling over; no reward otherwise.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
return 0.0
class LinearFATileCoding3456BalanceTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the state space into 3456 bins. We use the same action
space as in the superclass.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
return np.dot(self.magic_array, bin_indices)
def getBinIndices(self, linear_index):
"""Given a linear index (integer between 0 and outdim), returns the bin
indices for each of the state dimensions.
"""
return linear_index / self.magic_array % self.nbins_across_dims
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
state = one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
return state
class SARSALambda_LinFA_ReplacingTraces(SARSALambda_LinFA):
"""Randlov used replacing traces, but this doesn't exist in PyBrain's
SARSALambda.
"""
def _updateEtraces(self, state, action, responsibility=1.):
self._etraces *= self.rewardDiscount * self._lambda * responsibility
# This assumes that state is an identity vector (like, from one_to_n).
self._etraces[action] = clip(self._etraces[action] + state, -np.inf, 1.)
# Set the trace for all other actions in this state to 0:
action_bit = one_to_n(action, self.num_actions)
for argstate in argwhere(state == 1) :
self._etraces[argwhere(action_bit != 1), argstate] = 0.
task = LinearFATileCoding3456BalanceTask()
env = task.env
# The learning is very sensitive to the learning rate decay.
learner = SARSALambda_LinFA_ReplacingTraces(task.nactions, task.outdim,
learningRateDecay=2000)
learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
agent.logging = False
exp = EpisodicExperiment(task, agent)
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
env.saveWheelContactTrajectories(True)
plt.ion()
plt.figure(figsize=(8, 4))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
def update_wheel_trajectories():
front_lines = ax2.plot(env.get_xfhist(), env.get_yfhist(), 'r')
back_lines = ax2.plot(env.get_xbhist(), env.get_ybhist(), 'b')
plt.axis('equal')
perform_cumrewards = []
for irehearsal in range(7000):
# Learn.
# ------
r = exp.doEpisodes(1)
# Discounted reward.
cumreward = exp.task.getTotalReward()
#print 'cumreward: %.4f; nsteps: %i; learningRate: %.4f' % (
# cumreward, len(r[0]), exp.agent.learner.learningRate)
if irehearsal % 50 == 0:
# Perform (no learning).
# ----------------------
# Swap out the agent.
exp.agent = performance_agent
# Perform.
r = exp.doEpisodes(1)
perform_cumreward = task.getTotalReward()
perform_cumrewards.append(perform_cumreward)
print('PERFORMANCE: cumreward:', perform_cumreward, 'nsteps:', len(r[0]))
# Swap back the learning agent.
performance_agent.reset()
exp.agent = agent
ax1.cla()
ax1.plot(perform_cumrewards, '.--')
# Wheel trajectories.
update_wheel_trajectories()
plt.pause(0.001)
| bsd-3-clause |
Maplenormandy/list-62x | python/testAlgorithms.py | 1 | 13897 | import cv2
import math
import pandas as pd
import numpy as np
import time, sys, os, shutil
import yaml
from multiprocessing import Process, Queue
from Queue import Empty
import random
import imageFeatures as imf
import pickle
from sklearn import gaussian_process
"""
# This script collects data
if len(sys.argv) < 2:
print "No configuration file specified"
collectData = False
config = None
else:
collectData = True
try:
with open(sys.argv[1]) as f:
config = yaml.load(f.read())
except:
print "Error:", sys.exc_info()[0]
raise
"""
def currentTimestamp():
return pd.Timestamp(time.time()*1000000000)
def imageSaver(foldername, q):
while True:
toSave = None
try:
toSave = q.get(True, 1)
except Empty:
pass
if toSave != None:
if toSave == False:
print "Done"
break
name, frame = toSave
cv2.imwrite(foldername + '/' + name, frame, [cv2.IMWRITE_PNG_COMPRESSION, 9])
print "Wrote", foldername + '/' + name
"""
if collectData:
# Parse the configuration file
if 'settingsFile' in config:
rdf = pd.read_csv(config['settingsFile'])
totalFrames = len(rdf)
gains0 = rdf['Gain 0']
shutters0 = rdf['Shutter 0']
gains1 = rdf['Gain 1']
shutters1 = rdf['Shutter 1']
timestamps = pd.Series([currentTimestamp()] * totalFrames)
features = pd.Series([0] * totalFrames)
imageFiles0 = pd.Series([''] * totalFrames)
imageFiles1 = pd.Series([''] * totalFrames)
frames = rdf['Frame']
"""
frames = pd.Series([], dtype=int, name='Frame')
data = pd.DataFrame(index=frames)
params = {}
def setParam(name, x):
params[name] = x
print 'Run name:',
shortname = raw_input()
cv2.namedWindow('frame')
while True:
print 'Parameter name (empty to terminate):',
name = raw_input()
if name != '':
params[name] = 0
print 'max:',
pmax = int(raw_input())
cv2.createTrackbar(name, 'frame', 0, pmax, lambda x: setParam(name, x))
else:
break
# Change 0 to the index that works
cap0 = cv2.VideoCapture(0)
cap1 = cv2.VideoCapture(1)
# Create the output directory and copy over stuff
for i in range(100):
foldername = 'data/' + shortname + '_' + str(i)
if not os.path.exists(foldername):
os.makedirs(foldername)
break
"""
shutil.copy(sys.argv[1], foldername)
if 'settingsFile' in config:
shutil.copy(config['settingsFile'], foldername)
"""
def setCap0Exposure(x):
cap0.set(15,x)
def setCap1Exposure(x):
cap1.set(15,x)
def setCap0Gain(x):
cap0.set(14,x)
def setCap1Gain(x):
cap1.set(14,x)
def setCap0Auto(x):
cap0.set(21,x)
def setCap1Auto(x):
cap1.set(21,x)
def findMeanLumSettings(oldSettings, oldFeatures, newFeatures):
oldShutter, oldGain = oldSettings
newShutter = 1.0
newGain = 16.0
oldMeanLum = oldFeatures
newMeanLum = newFeatures
oldExposure = imf.settingsToExposure(oldShutter, oldGain)
newExposure = 111.2148 + 0.6940*oldExposure - 2.7011*oldMeanLum + 2.6972*newMeanLum
newShutter, newGain = imf.exposureToSettings(newExposure)
return newShutter, newGain
def findLinearFeatureLumSettings(oldSettings, oldFeatures, newFeatures):
oldShutter, oldGain = oldSettings
oldBlurLum = oldFeatures
newBlurLum = newFeatures
oldExposure = imf.settingsToExposure(oldShutter, oldGain)
newExposure = -35.4155 + 0.7933*oldExposure - 2.1544*oldBlurLum + 2.856*newBlurLum
newShutter, newGain = imf.exposureToSettings(newExposure)
return np.clip(newShutter,1.0,531.0), np.clip(newGain,16.0,64.0)
gp = pickle.load(open('gp_mean.p','r'))
#params = ['Exposure 0', 'Contrast 0', 'Contrast 1', 'Blur Luminance 0', 'Blur Luminance 1', 'Mean Foreground Illumination 0', 'Mean BackGround Illumination 0', 'Mean Foreground Illumination 1', 'Mean BackGround Illumination 1']
def findGPSettings(params):
newExposure = gp.predict(params)
newShutter, newGain = imf.exposureToSettings(newExposure)
return np.clip(newShutter,1.0,531.0), np.clip(newGain,16.0,64.0)
def usableMatch(matches, keypoints, keypointsBaseline):
correctMatches = []
minAmmount = 5
srcPts=[]
dstPts=[]
for m,n in matches:
if m.distance <.75*n.distance:
correctMatches.append(m)
if len(correctMatches)>minAmmount:
dst_pts = np.float32([ keypoints[m.trainIdx].pt for m in correctMatches ])
src_pts = np.float32([ keypointsBaseline[m.queryIdx].pt for m in correctMatches ])
ransacMatches, mask= cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
matchesMask = np.array(matchesMask)
numMatches = (matchesMask>.5).sum()
efficiency = [numMatches, len(keypoints)]
else:
efficiency = [0, len(keypoints)]
return efficiency
"""
if not collectData:
cv2.createTrackbar('Shutter Baseline', 'frame', 1, 531, setCap0Exposure)
cv2.createTrackbar('Gain Baseline', 'frame', 16, 64, setCap0Gain)
cv2.createTrackbar('Shutter Compared', 'frame', 1, 531, setCap1Exposure)
cv2.createTrackbar('Gain Compared', 'frame', 16, 64, setCap1Gain)
"""
# Helper variables
t = 0
i = 0
runNum = 0
startT = 0
expCam0 = True
writing = False
resetRun = False
index_params = dict(algorithm = 0, trees = 5)
search_params = dict(checks=50)
surf = cv2.SURF()
def surfDetectAndMatch(name, q, dq):
surf = cv2.SURF()
flann = cv2.FlannBasedMatcher(index_params, search_params)
oldFrame = None
oldKp = None
oldDesc = None
while True:
newFrame = None
try:
newFrame = q.get(True, 1)
print name + ": " + str(q.qsize()) + " left"
except Empty:
if oldFrame != None:
print name + ": Resetting"
oldFrame = None
if newFrame != None:
if newFrame == False:
dq.close()
kp = None
print name + ": Done"
break
if newFrame[2] == False:
kp, desc = surf.detectAndCompute(newFrame[1], None)
else:
kp_temp, desc = newFrame[1]
kp = [cv2.KeyPoint(x=p[0][0], y=p[0][1], _size=p[1], _angle=p[2], _response=p[3],
_octave=p[4], _class_id=p[5]) for p in kp_temp]
if oldFrame != None:
if newFrame[0] == oldFrame[0]:
print name + ": New run detected"
elif newFrame[0]-oldFrame[0] > 1:
print name + ": Warning, t mismatch!"
succTrackFeatures = 0
if desc != None and oldDesc != None:
matches = flann.knnMatch(oldDesc, desc, k=2)
efficiency = usableMatch(matches, kp, oldKp)
succTrackFeatures = efficiency[0]
dq.put((newFrame[0], succTrackFeatures))
oldFrame = newFrame
oldKp = kp
oldDesc = desc
oldParams = None
collectingGP = True
oldMeanLum = None
if cap0.isOpened() and cap1.isOpened():
q = Queue()
p = Process(target=imageSaver, args=(foldername, q,))
q0 = Queue()
dq0 = Queue()
p0 = Process(target=surfDetectAndMatch, args=("SDAM 0", q0, dq0,))
q1 = Queue()
dq1 = Queue()
p1 = Process(target=surfDetectAndMatch, args=("SDAM 1", q1, dq1,))
p.start()
p0.start()
p1.start()
# Turn off white balance
cap0.set(17, -4)
cap0.set(26, -4)
cap1.set(17, -4)
cap1.set(26, -4)
"""
if not collectData:
cv2.setTrackbarPos('Shutter Baseline', 'frame', int(cap0.get(15)))
cv2.setTrackbarPos('Gain Baseline', 'frame', int(cap0.get(14)))
cv2.setTrackbarPos('Shutter Compared', 'frame', int(cap1.get(15)))
cv2.setTrackbarPos('Gain Compared', 'frame', int(cap1.get(14)))
"""
while True:
i += 1
ret0, frame0 = cap0.read()
ret1, frame1 = cap1.read()
if ret0 and ret1:
frame0 = cv2.cvtColor(frame0, cv2.COLOR_BAYER_BG2BGR)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BAYER_BG2BGR)
disp = np.concatenate((frame0, frame1), axis=1)
try:
t0, succTrackFeatures0 = dq0.get_nowait()
data.loc[t0, 'Succesfully Tracked Features 0'] = succTrackFeatures0
except Empty:
pass
try:
t1, succTrackFeatures1 = dq1.get_nowait()
data.loc[t1, 'Succesfully Tracked Features 1'] = succTrackFeatures1
except Empty:
pass
if writing and i > 6:
gray0 = cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY)
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
# Calculate image features
if expCam0:
kp, desc = surf.detectAndCompute(gray0, None)
kp_temp = [(p.pt, p.size, p.angle, p.response, p.octave, p.class_id) for p in kp]
q0.put((t, (kp_temp, desc), True))
q1.put((t, gray1, False))
meanLum = imf.meanLuminance(gray0)
blurLum = imf.gaussianBlurfeatureLuminance(gray0, kp)
meanFg, meanBg = imf.weightedLuminance(gray0)
contrast = imf.contrast(gray0)
camSettings = (cap0.get(15), cap0.get(14))
else:
kp, desc = surf.detectAndCompute(gray1, None)
kp_temp = [(p.pt, p.size, p.angle, p.response, p.octave, p.class_id) for p in kp]
q1.put((t, (kp_temp, desc), True))
q0.put((t, gray0, False))
meanLum = imf.meanLuminance(gray1)
blurLum = imf.gaussianBlurfeatureLuminance(gray1, kp)
meanFg, meanBg = imf.weightedLuminance(gray1)
contrast = imf.contrast(gray1)
camSettings = (cap1.get(15), cap1.get(14))
newParams = (imf.settingsToExposure(camSettings[0], camSettings[1]),
contrast, blurLum, meanFg, meanBg)
if oldGray0 != None:
# Save raw data
data.loc[t, 'Timestamp'] = currentTimestamp()
data.loc[t, 'Run Number'] = runNum
data.loc[t, 'Baseline'] = 1 if expCam0 else 0
data.loc[t, 'Experimental Mean Luminance'] = meanLum
data.loc[t, 'Shutter 0'] = cap0.get(15)
data.loc[t, 'Gain 0'] = cap0.get(14)
data.loc[t, 'Shutter 1'] = cap1.get(15)
data.loc[t, 'Gain 1'] = cap1.get(14)
imgname0 = shortname + '_0_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 0'] = imgname0
imgname1 = shortname + '_1_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 1'] = imgname1
q.put((imgname0, frame0))
q.put((imgname1, frame1))
if collectingGP:
data.loc[t, 'Experimental Method'] = 'GP'
params = np.array([oldParams[0], oldMeanLum, meanLum])
newShutter, newGain = findGPSettings(params)
else:
data.loc[t, 'Experimental Method'] = 'linear_blur'
newShutter, newGain = findLinearFeatureLumSettings(oldCamSettings, oldBlurLum, blurLum)
# Determine new image settings
if expCam0:
cap0.set(14, newGain)
cap0.set(15, newShutter)
else:
cap1.set(14, newGain)
cap1.set(15, newShutter)
t += 1
oldGray0 = gray0
oldGray1 = gray1
oldParams = newParams
oldBlurLum = blurLum
oldCamSettings = camSettings
oldMeanLum = meanLum
i = 0
cv2.putText(disp, "Frame: " + str(t-startT), (50,50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.putText(disp, "Baseline: " + ("1" if expCam0 else "0"), (50,80),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.putText(disp, "GP" if collectingGP else "linear_blur", (50,110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.imshow('frame', disp)
else:
cap0.grab()
cap1.grab()
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
# The order is to press 'w' when starting a run, then press 'r' to do it again in a pair
elif key == ord('w'):
expCam0 = random.choice((True, False))
resetRun = True
elif key == ord('e'):
resetRun = True
elif key == ord('r'):
expCam0 = not expCam0
resetRun = True
elif key == ord('s'):
writing = False
runNum += 1
elif key == ord('g'):
collectingGP = not collectingGP
if resetRun:
resetRun = False
writing = True
startT = t
oldGray0 = None
oldGray1 = None
oldParams = None
i = 0
# To start off, set auto-exposure
cap0.set(14, -2)
cap0.set(15, -2)
cap1.set(14, -2)
cap1.set(15, -2)
q.put(False)
q0.put(False)
q1.put(False)
q.close()
dq0.close()
dq1.close()
q0.close()
q1.close()
#p.join()
#p0.join()
#p1.join()
if len(data) > 0:
data.to_csv(foldername + '/' + shortname + '_rawdata.csv')
| mit |
LohithBlaze/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
tudarmstadt-lt/sensegram | eval/significance.py | 1 | 2090 | from scipy.stats import binom
from pandas import read_csv
import numpy as np
import argparse
def mcnemar_midp(b, c):
"""Compute McNemar's test using the "mid-p" variant suggested by:
M.W. Fagerland, S. Lydersen, P. Laake. 2013. The McNemar test for
binary matched-pairs data: Mid-p and asymptotic are better than exact
conditional. BMC Medical Research Methodology 13: 91.
`b` is the number of observations correctly labeled by the first---but
not the second---system; `c` is the number of observations correctly
labeled by the second---but not the first---system."""
n = b + c
x = min(b, c)
dist = binom(n, .5)
p = 2. * dist.cdf(x)
midp = p - dist.pmf(x)
chi = float(abs(b - c)**2)/n
print("b = ", b)
print("c = ", c)
print("Exact p = ", p)
print("Mid p = ", midp)
print("Chi = ", chi)
def run(set1, set2):
r1 = read_csv(set1, sep='\t', encoding='utf8',
dtype={'predict_sense_ids': np.str, 'gold_sense_ids': np.str, 'context_id': np.str},
doublequote=False, quotechar="\\u0000" )
r2 = read_csv(set2, sep='\t', encoding='utf8',
dtype={'predict_sense_ids': np.str, 'gold_sense_ids': np.str, 'context_id': np.str},
doublequote=False, quotechar="\\u0000" )
s1 = r1["correct"].values.tolist()
s2 = r2["correct"].values.tolist()
b = sum([x and not y for (x,y) in zip(s1,s2)])
c = sum([not x and y for (x,y) in zip(s1,s2)])
mcnemar_midp(b, c)
def main():
parser = argparse.ArgumentParser(description='Compute statistical significance of predicted label sets')
parser.add_argument('set1', help='A path to the first evaluated dataset. Format: "context_id<TAB>target<TAB>target_pos<TAB>target_position<TAB>gold_sense_ids<TAB>predict_sense_ids<TAB>golden_related<TAB>predict_related<TAB>context<TAB>smth<TAB>correct')
parser.add_argument("set2", help="A path to the second evaluated dataset")
args = parser.parse_args()
run(args.set1, args.set2)
if __name__ == '__main__':
main() | apache-2.0 |
hainm/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
steelee/fishbowl-notebooks | ipython/profile_nbserver/ipython_config.py | 2 | 20465 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'Linux'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.TerminalInteractiveShell.display_page = False
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = u'/bin/vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.3 (default, Jul 15 2015, 16:31:30) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.