repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
dialounke/pylayers | pylayers/mobility/transit/Person.py | 2 | 20819 | """
.. currentmodule:: pylayers.mobility.transit.Person
Utility Functions
=================
.. autosummary;;
:toctree: generated/
scale
copy
Person Class
=================
.. autosummary;;
:toctree: generated/
Person.__init__
Person.__repr__
Person.move
Person.delete
"""
from SimPy.SimulationRT import Process,Simulation,hold
import ConfigParser
import datetime
#from math import *
#from random import normalvariate,uniform
from pylayers.mobility.transit.vec3 import vec3
from pylayers.mobility.transit.World import world
from pylayers.mobility.transit.SteeringBehavior import default_steering_mind
# from random import uniform,gauss,sample,seed
import random
import numpy as np
from pylayers.network.network import Network
from pylayers.util.utilnet import conv_vecarr
import matplotlib.pylab as plt
import pandas as pd
#from pylayers.util.pymysqldb import Database
import pylayers.util.pyutil as pyu
import pylayers.util.plotutil as plu
import pdb
def truncate(self, max):
"""
Parameters
----------
max
References
----------
"near collision avoidance" inspired from
http://people.revoledu.com/kardi/publication/Kouchi2001.pdf
"""
if self.length() > max:
return self.normalize() * max
else:
return vec3(self)
vec3.truncate = truncate
def scale(self, size):
"""
Parameters
----------
size : float
scaling factor
Returns
-------
scaled version of self
"""
return self.normalize() * size
vec3.scale = scale
def copy(self):
return vec3(self)
vec3.copy = copy
class Person(Process):
""" Person Process
Attributes
----------
ID : float/hex/str/...
agent Id
interval : float
refresh interval of agent mobility
roomId : int
room ID where agent start when simulation is launched
L : pylayers.gis.layout.Layout()
Layout instance, in which the agent is moving
net : pylayers.network.Network()
Network instance, in which network agent are communicating.
This is used for fill the true position filed of the graph
It must be removed in a further version ( when a proper save instance
would be created)
wld : pylayers.mobility.transit.world.world()
world instance. equivalent to layout but in the pytk framework.
TODO : remove in a further version
sim : SimPy.Simulation.Simulation.RT()
Simulation instance share by all the pylayer project.
moving : bool
indicate if the agent is moving or not ( relevant for acces poitns)
froom : list
list of forbiden rooms.
wait : float
wait time of the agent when he has reach teh desitaion
cdest : str
method for choosing setination 'random ' of file read
save : list
list of save option type .
It will be removed in a further version ( when a proper save instance
would be created)
Methods
-------
Move : make the agent move
"""
max_acceleration = 2.0 # m/s/s
max_speed = 1.2 # m/s
#radius = 0.2106 # if one person takes 1.5 feet^2 of space, per traffic stds
# 2r = 0.5 to 0.7 for "sports fans", per the Helbing, Farkas, Vicsek paper
radius = 2.85 # per the Teknomo, et al, paper
mass = 80 # kg
average_radius = 0.8/(2*np.pi)# Radius = Perimeter / 2pi
npers = 0
#GeomNet = np.array((0,0,[[1,2,3]],[[1,0,0]],[[0,0,1]]),dtype=GeomNetType)
def __init__(self, ID = 0, interval=0.05,roomId=-1, L=[], net=Network(),
wld = world(),seed=0,sim=None,moving=True,froom=[],wait=1.0,cdest='random',
save=[],color='k',pdshow=False):
""" Class Person
inherits of Simpy.SimulationRT
"""
#GeomNetType = np.dtype([('Id',int),
# ('time',int),
# ('p',float,(1,3)),
# ('v',float,(1,3)),
# ('a',float,(1,3))])
Person.npers +=1
Process.__init__(self,name='Person_ID'+str(ID),sim=sim)
self.ID=ID
self.color=color
self.pdshow=pdshow
self.L = L
self.world = wld
self.interval = interval
self.manager = None
self.manager_args = []
self.waypoints = []
self.moving=moving
# random.seed(seed)
if roomId < 0:
try :
self.roomId = random.sample(self.L.Gr.nodes(),1)[0]
except:
raise NameError('This error is due to the lack of Gr graph in the Layout argument passed to Person(Object)')
else:
self.roomId = roomId
self.forbidroomId = froom
self.cdest = cdest # choose tdestination type
if self.cdest == 'random':
# self.nextroomId = int(np.floor(random.uniform(0,self.L.Gr.size())))
try :
self.nextroomId = random.sample(self.L.Gr.nodes(),1)[0]
except:
raise NameError('This error is due to the lack of Gr graph in the Layout argument passed to Person(Object)')
while self.nextroomId == self.roomId or (self.nextroomId in self.forbidroomId): # or (self.nextroomId in self.sim.roomlist): # test destination different de l'arrive
# self.nextroomId = int(np.floor(random.uniform(0,self.L.Gr.size())))
self.nextroomId = random.sample(self.L.Gr.nodes(),1)[0]
#self.sim.roomlist.append(self.nextroomId) # list of all destiantion of all nodes in object sim
elif self.cdest == 'file':
cfg = ConfigParser.ConfigParser()
cfg.read(pyu.getlong('nodes_destination.ini','ini'))
self.room_seq=eval(dict(cfg.items(self.ID))['room_seq'])
self.room_wait=eval(dict(cfg.items(self.ID))['room_wait'])
print 'WARNING: when nodes_destination ini file is read:'
print '1) the room initialization starts in the first room of the list, not in the room configured in agent.ini'
print '2) forbiden rooms are neglected'
self.room_counter=1
self.nb_room=len(self.room_seq)
self.roomId=self.room_seq[0]
self.nextroomId=self.room_seq[self.room_counter]
self.wait=self.room_wait[self.room_counter]
#self.sim.roomlist.append(self.nextroomId) # list of all destiantion of all nodes in object sim
self.rooms, self.wp = self.L.waypointGw(self.roomId,self.nextroomId)
# self.dlist = [i in self.L.Gw.ldo for i in self.rooms]
for tup in self.wp[1:]:
self.waypoints.append(vec3(tup))
try:
self.position = vec3(L.Gr.pos[self.roomId][0],L.Gr.pos[self.roomId][1])
except:
self.position = vec3()
# self.old_pos = vec3()
self.stuck = 0
self.destination = self.waypoints[0]
self.arrived_in = False
self.velocity = vec3()
self.acceleration = vec3()
self.localx = vec3(1, 0)
self.localy = vec3(0, 1)
self.world.add_boid(self)
# from Helbing, et al "Self-organizing pedestrian movement"
maxspeed = 0.8
self.max_speed = maxspeed#random.normalvariate(maxspeed, 0.1)
self.desired_speed = maxspeed
self.radius = self.average_radius#random.normalvariate(self.average_radius, 0.025) / 2
self.intersection = vec3()
self.arrived = False
self.endpoint = False
self.behaviors = []
self.steering_mind = default_steering_mind
self.cancelled = 0
self.net=net
self.wait=wait
self.df = pd.DataFrame(columns=['t','x','y','vx','vy','ax','ay'])
self.df._metadata = self.ID
self.save=save
if 'mysql' in self.save:
config = ConfigParser.ConfigParser()
config.read(pyu.getlong('simulnet.ini','ini'))
sql_opt = dict(config.items('Mysql'))
self.db = Database(sql_opt['host'],sql_opt['user'],sql_opt['passwd'],sql_opt['dbname'])
self.date = datetime.datetime.now()
def __repr__(self):
s = 'Mechanical information\n***********************\n'
s = s + 'agent ID: ' + str(self.ID) +'\n'
s = s + 'color: ' + str(self.color) +'\n'
s = s + '\nposition: ' + str(conv_vecarr(self.position)) +'\n'
s = s + 'velocity: ' + str(conv_vecarr(self.velocity)) +'\n'
s = s + 'acceleration: ' + str(conv_vecarr(self.acceleration)) +'\n'
s = s + '\nOriginal room ID --->' + ' Current room ID ' +'---> Destination room ID\n'
s = s + str(self.roomId).ljust(16) +' ---> '+\
str(self.L.pt2ro(self.position)).ljust(16) +'---> ' + str(self.nextroomId)
s = s + '\ncurrent waypoint ID :' + str(self.rooms[0])
s = s + '\nremaining waypoint ID :' + str(self.rooms)
s = s + '\npos destination room ID: ' + str(conv_vecarr(self.destination))
s = s + '\nforbiden room list: ' + str(self.forbidroomId)
return s
def move(self):
""" Move the Person
"""
if self.pdshow:
fig =plt.gcf()
fig,ax=self.L.showG('w',labels=False,alphacy=0.,edges=False,fig=fig)
plt.draw()
plt.ion()
while True:
if self.moving:
if self.sim.verbose:
print 'meca: updt ag ' + self.ID + ' @ ',self.sim.now()
# if np.allclose(conv_vecarr(self.destination)[:2],self.L.Gw.pos[47]):
# import ipdb
# ipdb.set_trace()
while self.cancelled:
yield passivate, self
print "Person.move: activated after being cancelled"
checked = []
for zone in self.world.zones(self):
if zone not in checked:
checked.append(zone)
zone(self)
# updating acceleration
acceleration = self.steering_mind(self)
acceleration = acceleration.truncate(self.max_acceleration)
self.acceleration = acceleration
# updating velocity
velocity = self.velocity + acceleration * self.interval
self.velocity = velocity.truncate(self.max_speed)
if velocity.length() > 0.2:
# record direction only when we've really had some
self.localy = velocity.normalize()
self.localx = vec3(self.localy.y, -self.localy.x)
# updating position
self.position = self.position + self.velocity * self.interval
# self.update()
self.position.z=0
self.world.update_boid(self)
self.net.update_pos(self.ID,conv_vecarr(self.position),self.sim.now())
p=conv_vecarr(self.position).reshape(3,1)
v=conv_vecarr(self.velocity).reshape(3,1)
a=conv_vecarr(self.acceleration).reshape(3,1)
# fill panda dataframe 2D trajectory
self.df = self.df.append(pd.DataFrame({'t':pd.Timestamp(self.sim.now(),unit='s'),
'x':p[0],
'y':p[1],
'vx':v[0],
'vy':v[1],
'ax':a[0],
'ay':a[1]},
columns=['t','x','y','vx','vy','ax','ay']))
if self.pdshow:
ptmp =np.array([p[:2,0],p[:2,0]+v[:2,0]])
if hasattr(self, 'pl'):
self.pl[0].set_data(self.df['x'].tail(1),self.df['y'].tail(1))
self.pla[0].set_data(ptmp[:,0],ptmp[:,1])
circle= plt.Circle((self.df['x'].tail(1),self.df['y'].tail(1)),radius = self.radius,alpha=0.3)
ax.add_patch(circle)
else :
self.pl = ax.plot(self.df['x'].tail(1),self.df['y'].tail(1),'o',color=self.color,ms=self.radius*10)
self.pla = ax.plot(ptmp[:,0],ptmp[:,1],'r')
circle= plt.Circle((self.df['x'].tail(1),self.df['y'].tail(1)),radius = self.radius,alpha=0.3)
ax.add_patch(circle)
# try:
# fig,ax=plu.displot(p[:2],p[:2]+v[:2],'r')
# except:
# pass
# import ipdb
# ipdb.set_trace()
plt.draw()
plt.pause(0.0001)
if 'mysql' in self.save:
self.db.writemeca(self.ID,self.sim.now(),p,v,a)
if 'txt' in self.save:
pyu.writemeca(self.ID,self.sim.now(),p,v,a)
# new target when arrived in poi
if self.arrived and\
(self.L.pt2ro(self.position) ==\
self.L.Gw.node[self.rooms[1]]['room']):
self.arrived = False
if self.endpoint:
self.endpoint=False
self.roomId = self.nextroomId
# remove the remaining waypoint which correspond
# to current room position
del self.waypoints[0]
del self.rooms[0]
# del self.dlist[0]
#
# If door lets continue
#
#
# ig destination --> next room
#
#adjroom = self.L.Gr.neighbors(self.roomId)
#Nadjroom = len(adjroom)
if self.cdest == 'random':
# self.nextroomId = int(np.floor(random.uniform(0,self.L.Gr.size())))
self.nextroomId = random.sample(self.L.Gr.nodes(),1)[0]
# test 1 ) next != actualroom
# 2 ) nextroom != fordiden room
# 3 ) room not share without another agent
while self.nextroomId == self.roomId or (self.nextroomId in self.forbidroomId):# or (self.nextroomId in self.sim.roomlist):
# self.nextroomId = int(np.floor(random.uniform(0,self.L.Gr.size())))
self.nextroomId = random.sample(self.L.Gr.nodes(),1)[0]
elif self.cdest == 'file':
self.room_counter=self.room_counter+1
if self.room_counter >= self.nb_room:
self.room_counter=0
self.nextroomId=self.room_seq[self.room_counter]
self.wait=self.room_wait[self.room_counter]
#self.sim.roomlist.append(self.nextroomId) # list of all destiantion of all nodes in object sim
self.rooms, wp = self.L.waypointGw(self.roomId,self.nextroomId)
# self.dlist = [i in self.L.Gw.ldo for i in self.rooms]
for tup in wp[1:]:
self.waypoints.append(vec3(tup))
#nextroom = adjroom[k]
# print "room : ",self.roomId
# print "nextroom : ",self.nextroomId
#p_nextroom = self.L.Gr.pos[self.nextroomId]
#setdoors1 = self.L.Gr.node[self.roomId]['doors']
#setdoors2 = self.L.Gr.node[nextroom]['doors']
#doorId = np.intersect1d(setdoors1,setdoors2)[0]
#
# coord door
#
#unode = self.L.Gs.neighbors(doorId)
#p1 = self.L.Gs.pos[unode[0]]
#p2 = self.L.Gs.pos[unode[1]]
#print p1
#print p2
#pdoor = (np.array(p1)+np.array(p2))/2
self.destination = self.waypoints[0]
if self.sim.verbose:
print 'meca: ag ' + self.ID + ' wait ' + str(self.wait)#*self.interval)
yield hold, self, self.wait
else:
del self.waypoints[0]
del self.rooms[0]
# del self.dlist[0]
#print "wp : ", self.waypoints
if len(self.waypoints)==1:
self.endpoint=True
self.destination = self.waypoints[0]
#print "dest : ", self.destination
else:
yield hold, self, self.interval
else:
# self.update()
self.world.update_boid(self)
self.net.update_pos(self.ID,conv_vecarr(self.position),self.sim.now())
yield hold, self, self.interval
def delete(self):
"""
delete boid from world.tk
"""
tk = self.world.tk
tk.canvas.delete(self.graphic)
self.world.remove_boid(self)
self.cancelled = 1
# def update(self):
# tk = self.world.tk
# if tk is None: return
# if not hasattr(self, 'graphic'):
# self.graphic = tk.canvas.create_polygon(0, 0, 0, 1, 1, 1, 1, 0, smooth=True, fill='red', outline='black', tag='person')
# if tk.collision_vectors:
# self.graphic_front_collision = tk.canvas.create_line(0, 0, 1, 0, fill='white')
# self.graphic_left_collision = tk.canvas.create_line(0, 0, 1, 0, fill='white')
# self.graphic_right_collision = tk.canvas.create_line(0, 0, 1, 0, fill='white')
# self.graphic_intersection = tk.canvas.create_oval(0, 0, 1, 1, fill='red')
# self.graphic_intersection_normal = tk.canvas.create_line(0, 0, 1, 1, fill='red')
# if tk.vectors:
# self.graphic_velocity = tk.canvas.create_line(0, 0, 1, 0, fill='green')
# self.graphic_acceleration = tk.canvas.create_line(0, 0, 1, 0, fill='blue')
# x_, y_ = tk.x_, tk.y_
# position = self.position
# width, depth = self.localx * (self.radius), self.localy * (self.radius * 0.65)
# ul = position - depth + width
# ur = position + depth + width
# lr = position + depth - width
# ll = position - depth - width
# tk.canvas.coords(self.graphic,
# x_(ul.x), y_(ul.y), x_(ur.x), y_(ur.y),
# x_(lr.x), y_(lr.y), x_(ll.x), y_(ll.y))
# tk.canvas.create_line(x_(ur.x),y_(ur.y),x_(ul.x),y_(ul.y))
# if tk.vectors:
# xx, yy, unused_z = position
# velocity = self.velocity
# tk.canvas.coords(self.graphic_velocity,
# x_(xx), y_(yy), x_(xx + velocity.x), y_(yy + velocity.y))
# acceleration = self.acceleration
# tk.canvas.coords(self.graphic_acceleration,
# x_(xx), y_(yy), x_(xx + acceleration.x), y_(yy + acceleration.y))
# if tk.collision_vectors:
# xx, yy, unused_z = position
# speed = self.velocity.length()
# front_check = 0.5 + speed * 1.5
# side_check = 0.5 + speed * 0.5
# point = self.localy.scale(front_check)
# tk.canvas.coords(self.graphic_front_collision,
# x_(xx), y_(yy), x_(xx + point.x), y_(yy + point.y))
# point = (self.localy + self.localx).scale(side_check)
# tk.canvas.coords(self.graphic_left_collision,
# x_(xx), y_(yy), x_(xx + point.x), y_(yy + point.y))
# point = (self.localy - self.localx).scale(side_check)
# tk.canvas.coords(self.graphic_right_collision,
# x_(xx), y_(yy), x_(xx + point.x), y_(yy + point.y))
# if self.intersection:
# xx, yy, unused_z = self.intersection
# tk.canvas.coords(self.graphic_intersection,
# x_(xx-0.2), y_(yy-0.2), x_(xx + 0.2), y_(yy + 0.2))
# point = self.intersection_normal
# tk.canvas.coords(self.graphic_intersection_normal,
# x_(xx), y_(yy), x_(xx + point.x), y_(yy + point.y))
# else:
# tk.canvas.coords(self.graphic_intersection,
# 0, 0, 0, 0)
# tk.canvas.coords(self.graphic_intersection_normal,
# 0, 0, 0, 0)
# tk.canvas.lower(self.graphic, 'vehicle')
| mit |
mxjl620/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/sparse/list.py | 16 | 3695 | import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import pprint_thing
from pandas.sparse.array import SparseArray
import pandas._sparse as splib
class SparseList(PandasObject):
"""
Data structure for accumulating data to be converted into a
SparseArray. Has similar API to the standard Python list
Parameters
----------
data : scalar or array-like
fill_value : scalar, default NaN
"""
def __init__(self, data=None, fill_value=np.nan):
self.fill_value = fill_value
self._chunks = []
if data is not None:
self.append(data)
def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
def __len__(self):
return sum(len(c) for c in self._chunks)
def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
raise ValueError('%d out of range' % i)
i += len(self)
passed = 0
j = 0
while i >= passed + len(self._chunks[j]):
passed += len(self._chunks[j])
j += 1
return self._chunks[j][i - passed]
def __setitem__(self, i, value):
raise NotImplementedError
@property
def nchunks(self):
return len(self._chunks)
@property
def is_consolidated(self):
return self.nchunks == 1
def consolidate(self, inplace=True):
"""
Internally consolidate chunks of data
Parameters
----------
inplace : boolean, default True
Modify the calling object instead of constructing a new one
Returns
-------
splist : SparseList
If inplace=False, new object, otherwise reference to existing
object
"""
if not inplace:
result = self.copy()
else:
result = self
if result.is_consolidated:
return result
result._consolidate_inplace()
return result
def _consolidate_inplace(self):
new_values = np.concatenate([c.sp_values for c in self._chunks])
new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
new_arr = SparseArray(new_values, sparse_index=new_index,
fill_value=self.fill_value)
self._chunks = [new_arr]
def copy(self):
"""
Return copy of the list
Returns
-------
new_list : SparseList
"""
new_splist = SparseList(fill_value=self.fill_value)
new_splist._chunks = list(self._chunks)
return new_splist
def to_array(self):
"""
Return SparseArray from data stored in the SparseList
Returns
-------
sparr : SparseArray
"""
self.consolidate(inplace=True)
return self._chunks[0]
def append(self, value):
"""
Append element or array-like chunk of data to the SparseList
Parameters
----------
value: scalar or array-like
"""
if np.isscalar(value):
value = [value]
sparr = SparseArray(value, fill_value=self.fill_value)
self._chunks.append(sparr)
self._consolidated = False
def _concat_sparse_indexes(indexes):
all_indices = []
total_length = 0
for index in indexes:
# increment by offset
inds = index.to_int_index().indices + total_length
all_indices.append(inds)
total_length += index.length
return splib.IntIndex(total_length, np.concatenate(all_indices))
| artistic-2.0 |
jakobworldpeace/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
monitor1379/sammale | examples/e1_LinearPrimaryFunctionMode_for_regressionl.py | 1 | 1822 | # encoding: utf-8
"""
@author: monitor1379
@contact: yy4f5da2@hotmail.com
@site: www.monitor1379.com
@version: 1.0
@license: GNU General Public License(Version 3)
@file: e1_LinearPrimaryFunctionModel.py
@time: 2016/12/18 23:56
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from sammale import datasets
from sammale import objectives
from sammale.models import LinearPrimaryFunctionModel
def run():
test1()
# test2()
def test1():
x, y = datasets.make_simple_curve1(100, 0.1)
model = LinearPrimaryFunctionModel()
model.add(lambda t: 1)
model.add(lambda t: t)
model.add(lambda t: np.sin(t * np.pi) / (t * np.pi))
y_pred = model.predict(x)
print('mse:{}'.format(objectives.MSE(y, y_pred)))
model.fit_LS(x, y)
# model.fit(x, y, method='wls')
y_pred = model.predict(x)
print('mse:{}'.format(objectives.MSE(y, y_pred)))
print(model.theta)
plt.scatter(x, y, c='b', label='y_real')
plt.scatter(x, y_pred, c='r', label='y_pred')
plt.legend()
plt.show()
def test2():
model = LinearPrimaryFunctionModel()
model.add(lambda t: 1)
model.add(lambda t: t)
model.add(lambda t: t ** 2)
model.add(lambda t: t ** 3)
model.add(lambda t: t ** 4)
model.add(lambda t: t ** 5)
model.add(lambda t: t ** 6)
model.add(lambda t: t ** 7)
model.add(lambda t: t ** 8)
x, y = datasets.make_simple_curve1(100, 0.2)
y_pred = model.predict(x)
print('mse:{}'.format(objectives.MSE(y, y_pred)))
model.fit_LS(x, y)
y_pred = model.predict(x)
print('mse:{}'.format(objectives.MSE(y, y_pred)))
plt.scatter(x, y, c='b', label='y_real')
plt.scatter(x, y_pred, c='r', label='y_pred')
plt.legend()
plt.show()
if __name__ == '__main__':
run()
| gpl-3.0 |
BBN-Q/PyQLab | analysis/SSRO.py | 4 | 6234 | import numpy as np
from scipy.signal import butter, lfilter
from scipy.io import loadmat
from sklearn import decomposition, preprocessing, cross_validation
from sklearn import svm, grid_search
import pywt
import h5py
import matplotlib.pyplot as plt
def create_fake_data(SNR, dt, maxTime, numShots, T1=1.0):
"""
Helper function to create fake training data.
"""
#Create a random set of decay times
decayTimes = np.random.exponential(scale=T1, size=numShots/2 )
#Create the noiseless decays
timePts = np.arange(dt, maxTime, dt)
fakeData = np.zeros((numShots, timePts.size))
#Put the ground state runs down to -1
fakeData[::2, :] = -1.0
#Put the excited state ones to one before they decay
for ct, decayTime in enumerate(decayTimes):
fakeData[2*ct-1] = 2*(decayTime>timePts)-1
#Now add Gaussian noise
fakeData += np.random.normal(scale=1.0/np.sqrt(dt*SNR), size=fakeData.shape)
return fakeData
def extract_meas_data(fileName, numAvgs):
"""
Helper function to load, filter and extract pulsed measurment data from single shot records.
"""
#Load the matlab data
rawData = np.mean(loadmat(fileName)['demodSignal'].reshape((3200, 8000/numAvgs, numAvgs), order='F'), axis=2)
#Decimate by a factor of 8 twice
b,a = butter(5, 0.5/8)
filteredData = lfilter(b,a, avgData, axis=0)[::8, :]
filteredData = lfilter(b,a, filteredData, axis=0)[::8, :]
#Extract the pulse part
pulseData = filteredData[10:40,:]
#Pull out real and imaginary components
return np.hstack((pulseData.real.T, pulseData.imag.T))
def fidelity_est(testSignals):
"""
Estimate the optimal fidelity by estimating the probability distributions.
"""
rangeMin = np.min(testSignals)
rangeMax = np.max(testSignals)
groundProb = np.histogram(testSignals[::2], bins=100, range=(rangeMin, rangeMax), density=True)[0]
excitedProb, binEdges = np.histogram(testSignals[1::2], bins=100, range=(rangeMin, rangeMax), density=True)
return 0.5*(binEdges[1]-binEdges[0])*np.sum(np.abs(groundProb-excitedProb))
def test_fixed(SNRs):
"""
Fixed (infinite T1) qubit.
"""
fidelities = []
numShots = 10000
dt = 1e-3
for SNR in SNRs:
fakeData = create_fake_data(SNR, dt, 1, numShots, T1=1e9)
signal = dt*np.sum(fakeData, axis=1)
fidelities.append(fidelity_est(signal))
return fidelities
def test_boxcar(SNRs, intTimes):
"""
Simple box-car integration with finite T1.
"""
fidelities = []
numShots = 10000
dt = 1e-3
trueStates = np.tile([False, True], numShots/2)
for SNR, intTime in zip(SNRs, intTimes):
fakeData = create_fake_data(SNR, dt, intTime, numShots)
signal = dt*np.sum(fakeData, axis=1)
fidelities.append(fidelity_est(signal))
return fidelities
def test_nn(SNR):
pass
def load_exp_data(fileName):
"""
Helper function to load data dumped from matlab
"""
f = h5py.File(fileName, 'r')
gData = np.fromstring(f['groundData'].value, dtype=np.complex).reshape(f['groundData'].shape)
eData = np.fromstring(f['excitedData'].value, dtype=np.complex).reshape(f['excitedData'].shape)
return gData, eData
def wavelet_transform(measRecords, wavelet):
"""
Take and array of measurment records, wavelet transform and return the most significant components.
"""
out = []
for record in measRecords:
cA3, cD3, cD2, cD1 = pywt.wavedec(record, wavelet, level=3)
out.append(np.hstack((cA3, cD3)))
return np.array(out)
def credible_interval(outcomes, c=0.95):
"""
Calculate the credible interval for a fidelity estimate.
"""
from scipy.special import betaincinv
N = outcomes.size
S = np.count_nonzero(outcomes)
xlo = betaincinv(S+1,N-S+1,(1-c)/2.)
xup = betaincinv(S+1,N-S+1,(1+c)/2.)
return xlo, xup
for ct in range(90,120):
testSignals[::2] = np.sum((weights*gUnWound.real)[:,:ct], axis=1)
testSignals[1::2] = np.sum((weights*eUnWound.real)[:,:ct], axis=1)
print(fidelity_est(testSignals))
if __name__ == '__main__':
pass
# SNR = 1e4
# fakeData = create_fake_data(SNR, 1e-3, 1, 4000)
# # trainData = pca.transform(fakeData)
# # validateData = pca.transform(create_fake_dpata(SNR, 1e-2, 1, 2000))
# trueStates = np.tile([0,1], 2000).flatten()
# # trueStates = np.hstack((np.zeros(1000), np.ones(1000)))
# # testData = wavelet_transform(fakeData, 'db4')
# testData = pca.transform(fakeData)
# # scaler = preprocessing.Scaler().fit(testData)
# # testData = scaler.transform(testData)
# # fakeData = create_fake_data(SNR, 1e-2, 1, 5000)
# validateData = scaler.transform(validateData)
gData, eData = load_exp_data('/home/cryan/Desktop/SSData.mat')
# #Use PCA to extract fewer, more useful features
# allData = np.vstack((np.hstack((gData.real, gData.imag)), np.hstack((eData.real, eData.imag))))
# pca = decomposition.PCA()
# pca.n_components = 20
# reducedData = pca.fit_transform(allData)
# #Assing the assumed states
# states = np.repeat([0,1], 10000)
# X_train, X_test, y_train, y_test = cross_validation.train_test_split(reducedData, states, test_size=0.2, random_state=0)
# # searchParams = {'gamma':(1.0/100)*np.logspace(-3, 0, 10), 'nu':np.arange(0.01, 0.2, 0.02)}
# # clf = grid_search.GridSearchCV(svm.NuSVC(cache_size=2000), searchParams, n_jobs=2)
# searchParams = {'C':np.linspace(0.1,4,20)}
# clf = grid_search.GridSearchCV(svm.SVC(cache_size=2000), searchParams)
# # clf = svm.SVC()
# clf.fit(X_train, y_train)
# print clf.score(X_test, y_test)
# gridScores = np.reshape([x[1] for x in clf.grid_scores_], (clf.param_grid['nu'].size, clf.param_grid['gamma'].size))
# x_min, x_max = reducedData[:, 0].min() - 0.1, reducedData[:, 0].max() + 0.1
# y_min, y_max = reducedData[:, 1].min() - 0.1, reducedData[:, 1].max() + 0.1
# xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.005), np.arange(y_min, y_max, 0.005))
# Z = clf.predict(np.c_[xx.ravel(), yy.ravel(), np.zeros(xx.size), np.zeros(xx.size)])
# Z = Z.reshape(xx.shape)
# plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
# plt.scatter(reducedData[:10000,0], reducedData[:10000,1], s=1, c='b', edgecolors='none')
# plt.scatter(reducedData[10000:,0], reducedData[10000:,1], s=1, c='r', edgecolors='none')
# plt.xlim((x_min, x_max))
# plt.ylim((y_min, y_max))
# plt.xlabel('Principal Component 1', fontsize=16)
# plt.ylabel('Principal Component 2', fontsize=16)
# plt.show()
| apache-2.0 |
Jessime/Excision | src/play_levels.py | 1 | 7834 | import os
import sys
import json
import markdown
import subprocess as sp
import numpy as np
import pandas as pd
from flask import Markup
from pickle import load
from importlib import import_module, reload
from shutil import copyfile
from traceback import format_exc
from level_markdown import parse, change_log
class State():
"""
Parameters
----------
lvl_data : dict
An example schema for lvl_data is:
{level_number : {'problem':{'cmd': 'command',
'answer': 'answer'
},
'task1':{'func': 'function_name',
'tests': [[arg1, arg2, arg3],
[arg1, arg2, arg3],
[arg1, arg2, arg3]
],
'answers': [answer1, answer2, answer3]
}
'task2':{'func': 'function_name',
'tests': [[arg1, arg2],
[arg1, arg2]
]
'answers': [answer1, answer2]
}
'task2':{'func': 'function_name',
'tests': [[arg1, arg2, arg3]
]
'answers': [answer1]
}
}
}
"""
PACKAGE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
config_path = os.path.join(PACKAGE, 'config.json')
try:
config = json.load(open(config_path))
except FileNotFoundError:
config = {"current_level": {"num": 1, "title": "DECISIONS"}}
json.dump(config, open(config_path, 'w'))
lvl_num_top = config['current_level']['num']
lvl_num_active = lvl_num_top
lvl_title = config['current_level']['title']
script = None
error_types = {'missing_file': 'Error: No file produced at Excision/results/{}.txt.',
'empty_file': 'Error: There is nothing in the file you created.',
'bad_type': 'Error: Your result may need to be a {} not a {}.',
'incorrect': 'Error: Your answer is incorrect.',
'task_test': 'Error: The function gave an incorrect answer on a test.',
'no_func': 'Error: The file does not contain the correct function name.'}
lvl_data = load(open('levels.pkl', 'rb'))
changes = change_log()
_sections = None
@classmethod
def get_sections(self, lvl_num):
no_markup = {'title', 'subtitle', 'img'}
infile = 'static/story/level{}.md'.format(lvl_num)
sec_dict = parse(infile)
changes = change_log()
sec_dict = {**sec_dict, **changes}
marked_dict = {}
for k, v in sec_dict.items():
if k not in no_markup:
v = self.markup_str(self, v)
marked_dict[k] = v
return marked_dict
@classmethod
def update_config(self):
"""If the user sucessfully completes a level, save the game."""
self.lvl_num_top += 1
infile = 'static/story/level{}.md'.format(self.lvl_num_top)
sections = parse(infile)
self.lvl_title = sections['title']
self.config['current_level'] = {'num':self.lvl_num_top, 'title':self.lvl_title}
json.dump(self.config, open(self.config_path, 'w'))
@classmethod
def process_request(self, func_name):
"""Execute the method corresponding to func_name."""
result = vars(self)[func_name](self)
return result
def markup_str(self, string):
"""Prepares a markdown formatted string for insertion into jinja template."""
markup = Markup(markdown.markdown(string))
return markup
def temp_copy(self):
"""Creates a copy of a user file into the src dir to be imported"""
new = os.path.basename(self.script)
copyfile(self.script, new)
return new
def temp_del(self, temp):
"""Delete file created by temp_copy."""
if os.path.isfile(temp):
os.remove(temp)
def try_running_problem(self, cmd):
error = None
#TODO For loop here if we want multiple tests
try:
print(cmd.split())
sp.run(cmd.split(), stderr=sp.PIPE, check=True)
except sp.CalledProcessError as e:
error = e.stderr.decode("utf-8")
return error
def check_result(self, outfile, error, ans):
with open(outfile) as outfile:
result = outfile.read().strip()
if not result:
error = self.error_types['empty_file']
if not error and result != ans:
error = self.error_types['incorrect']
return error
def problem(self):
success = False
error = None
outfile = '../results/{}.txt'.format(self.lvl_num_active) #TODO edit to represent current displayed problem; temp state?
data = self.lvl_data[self.lvl_num_active]['problem'] #TODO edit to represent current displayed problem; temp state?
if os.path.isfile(outfile):
os.remove(outfile)
error = self.try_running_problem(self, data['cmd'].format(self.script))
if not os.path.isfile(outfile) and error is None:
error = self.error_types['missing_file'].format(self.lvl_num_active) #TODO edit to represent current displayed problem; temp state?
return success, error
if error is None:
error = self.check_result(self, outfile, error, data['answer'])
success = error is None
return success, error
def check_task_result(self, result, ans):
error = None
if isinstance(ans, np.ndarray):
equal = np.array_equal(result, ans)
elif isinstance(ans, pd.DataFrame):
equal = ans.equals(result)
else:
equal = result == ans
if not equal:
error = self.error_types['task_test']
if type(result) != type(ans):
error = self.error_types['bad_type']
error = error.format(type(ans), type(result))
return error
def try_running_function(self, new, data):
error = None
module_name = new.split('.')[0]
try:
if module_name in sys.modules:
user_import = reload(sys.modules[module_name])
else:
user_import = import_module(module_name)
if data['func'] not in vars(user_import):
error = self.error_types['no_func']
return error
func = vars(user_import)[data['func']]
for test, ans in zip(data['tests'], data['answers']):
test = [p.copy() if isinstance(p, np.ndarray) else p for p in test]
result = func(*test)
error = self.check_task_result(self, result, ans)
if error is not None:
break
except Exception:
error = format_exc()
return error
def task_test(self, task):
error = None
new = self.temp_copy(self)
data = self.lvl_data[self.lvl_num_active][task] #TODO edit to represent current displayed problem; temp state?
error = self.try_running_function(self, new, data)
self.temp_del(self, new)
success = error is None
return success, error
def task1(self):
return self.task_test(self, 'task1')
def task2(self):
return self.task_test(self, 'task2')
def task3(self):
return self.task_test(self, 'task3')
| mit |
sanuj/shogun | examples/undocumented/python_modular/graphical/interactive_gp_demo.py | 10 | 14176 | #
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 3 of the License, or
# (at your option) any later version.
#
# Written (C) 2012 Heiko Strathmann, based on interactive_svm_demo by Christian
# Widmer which itself is based on PyQT Demo by Eli Bendersky
#
"""
Shogun Gaussian processes demo based on interactive SVM demo by Christian \
Widmer and Soeren Sonnenburg which itself is based on PyQT Demo by Eli Bendersky
Work to be done on parameter (e.g. kernel width) optimization.
Heiko Strathmann/Cameron Lai
License: GPLv3
"""
import sys, os, csv
import scipy as SP
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.create_toy_data()
self.on_show()
def on_show(self):
self.axes.clear()
self.axes.plot(self.x, self.y, 'ro')
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.grid(True)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
x=SP.append(self.x, event.xdata)
self.y=SP.append(self.y, event.ydata)
self.x= x[:,SP.newaxis]
self.on_show()
self.status_text.setText("New data point: x=%f, y=%f"%(event.xdata, event.ydata))
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
#load_action = self.create_action("&Load file",
# shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
#self.add_actions(self.file_menu,
# (load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def clear_data(self):
self.x=SP.array([])
self.y=SP.array([])
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.on_show()
self.status_text.setText("Data cleared")
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "Linear":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "Polynomial":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "Gaussian":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def get_stats(self):
num_train = len(self.x)
str_train = "num training points: %i" % num_train
str_test = "num training points: %s" % self.nTest.text()
return (str_train, str_test)
def create_toy_data(self):
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
x = SP.arange(self.xmin,self.xmax,(self.xmax-self.xmin)/100.0)
C = 2 #offset
b = 0
y = b*x + C + float(self.sine_amplitude.text())*SP.sin(float(self.sine_freq.text())*x)
# dy = b + 1*SP.cos(x)
y += float(self.noise_level.text())*random.randn(y.shape[0])
self.y=y-y.mean()
self.x= x[:,SP.newaxis]
self.on_show()
def learn_kernel_width(self):
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels, crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
self.sigma.setText("1.0")
self.plot_gp()
def plot_gp(self):
feat_train = RealFeatures(self.x.T)
labels = RegressionLabels(self.y)
#[x,y]=self.data.get_data()
#feat_train=RealFeatures(x.T)
#labels=RegressionLabels(y)
n_dimensions = 1
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
#covar_parms = SP.log([2])
#hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
# construct covariance function
width=float(self.sigma.text())
degree=int(self.degree.text())
if kernel_name == "Linear":
gk = LinearKernel(feat_train, feat_train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Polynomial":
gk = PolyKernel(feat_train, feat_train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Gaussian":
gk = GaussianKernel(feat_train, feat_train, width)
#SECF = GaussianKernel(feat_train, feat_train, width)
#covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(gk, feat_train, zmean, labels, likelihood);
inf.get_negative_marginal_likelihood()
# location of unispaced predictions
x_test = array([linspace(self.xmin,self.xmax, self.nTest.text())])
feat_test=RealFeatures(x_test)
gp = GaussianProcessRegression(inf)
gp.train()
covariance = gp.get_variance_vector(feat_test)
predictions = gp.get_mean_vector(feat_test)
#print "x_test"
#print feat_test.get_feature_matrix()
#print "mean predictions"
#print predictions.get_labels()
#print "covariances"
#print covariance.get_labels()
self.status_text.setText("Negative Log Marginal Likelihood = %f"%(inf.get_negative_marginal_likelihood()))
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.hold(True)
x_test=feat_test.get_feature_matrix()[0]
self.axes.plot(x_test, predictions, 'b-x')
#self.axes.plot(x_test, labels.get_labels(), 'ro')
self.axes.plot(self.x, self.y, 'ro')
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()-3*sqrt(covariance.get_labels()))
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()+3*sqrt(covariance.get_labels()))
upper = predictions+3*sqrt(covariance)
lower = predictions-3*sqrt(covariance)
self.axes.fill_between(x_test, lower, upper, color='grey')
self.axes.hold(False)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def create_main_frame(self):
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "Gaussian")
self.kernel_combo.insertItem(-1, "Polynomial")
self.kernel_combo.insertItem(-1, "Linear")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
log_label = QLabel("Data points")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
self.sine_freq = QLineEdit()
self.sine_freq.setText("1.0")
self.sine_amplitude = QLineEdit()
self.sine_amplitude.setText("1.0")
self.sigma = QLineEdit()
self.sigma.setText("1.2")
self.degree = QLineEdit()
self.degree.setText("2")
self.noise_level = QLineEdit()
self.noise_level.setText("1")
self.nTest = QLineEdit()
self.nTest.setText("100")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(QLabel('Sine data setting: '))
spins_hbox.addWidget(QLabel('Sine Freq.'))
spins_hbox.addWidget(self.sine_freq)
spins_hbox.addWidget(QLabel('Sine Amplitude'))
spins_hbox.addWidget(self.sine_amplitude)
spins_hbox.addWidget(QLabel('Noise Level'))
spins_hbox.addWidget(self.noise_level)
spins_hbox.addStretch(1)
spins_hbox2 = QHBoxLayout()
spins_hbox2.addWidget(QLabel('Kernel Setting: '))
spins_hbox2.addWidget(QLabel('Type'))
spins_hbox2.addWidget(self.kernel_combo)
spins_hbox2.addWidget(QLabel("Width"))
spins_hbox2.addWidget(self.sigma)
spins_hbox2.addWidget(QLabel("Degree"))
spins_hbox2.addWidget(self.degree)
spins_hbox2.addStretch(1)
spins_hbox3 = QHBoxLayout()
spins_hbox3.addWidget(QLabel('Test Setting: '))
spins_hbox3.addWidget(QLabel('Number of test points'))
spins_hbox3.addWidget(self.nTest)
spins_hbox3.addStretch(1)
self.show_button = QPushButton("&Train GP")
self.connect(self.show_button, SIGNAL('clicked()'), self.plot_gp)
self.gen_sine_data_button = QPushButton("&Generate Sine Data")
self.connect(self.gen_sine_data_button, SIGNAL('clicked()'), self.create_toy_data)
self.clear_data_button = QPushButton("&Clear")
self.connect(self.clear_data_button, SIGNAL('clicked()'), self.clear_data)
self.learn_kernel_button = QPushButton("&Learn Kernel Width and train GP")
self.connect(self.learn_kernel_button, SIGNAL('clicked()'), self.learn_kernel_width)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(QLabel("Data Points"))
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_vbox.addWidget(QLabel("Settings"))
right2_vbox.addWidget(self.gen_sine_data_button)
right2_vbox.addWidget(self.clear_data_button)
right2_vbox.addWidget(self.show_button)
#right2_vbox.addWidget(self.learn_kernel_button)
right2_vbox.addLayout(spins_hbox)
right2_vbox.addLayout(spins_hbox2)
right2_vbox.addLayout(spins_hbox3)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| gpl-3.0 |
devanshdalal/scikit-learn | examples/covariance/plot_outlier_detection.py | 36 | 5023 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/io/tests/test_pytables.py | 9 | 194218 | import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex, TimedeltaIndex, isnull)
from pandas.compat import is_platform_windows, PY3
from pandas.io.pytables import _tables, TableIterator
try:
_tables()
except ImportError as e:
raise nose.SkipTest(e)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
from pandas import compat
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
from numpy.testing.decorators import slow
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# testing on windows/py3 seems to fault
# for using compression
skip_compression = PY3 and is_platform_windows()
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(),path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [ create_tempfile(p) for p in path ]
yield filenames
else:
filenames = [ create_tempfile(path) ]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w, f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class Base(tm.TestCase):
@classmethod
def setUpClass(cls):
super(Base, cls).setUpClass()
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def tearDownClass(cls):
super(Base, cls).tearDownClass()
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
class TestHDFStore(Base, tm.TestCase):
def test_factory_fun(self):
path = create_tempfile(self.path)
try:
with get_store(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with get_store(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj,**kwargs):
obj.to_hdf(path, key,**kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame',o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel',o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path,'table',append=True)
result = read_hdf(path, 'table', where = ['index>2'])
assert_frame_equal(df[df.index>2],result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True)
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True)
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',append=False,format='fixed')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False,format='f')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False)
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=True,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# append to False
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# formats
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format=None)
assert_frame_equal(store.select('df'),df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='f')
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='fixed')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar')
#File path doesn't exist
path = ""
self.assertRaises(IOError, read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
_maybe_remove(store,'df')
store.put('df',df)
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, store.append, 'df2',df)
pandas.set_option('io.hdf.default_format','table')
_maybe_remove(store,'df')
store.put('df',df)
self.assertTrue(store.get_storer('df').is_table)
_maybe_remove(store,'df2')
store.append('df2',df)
self.assertTrue(store.get_storer('df').is_table)
pandas.set_option('io.hdf.default_format',None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
df.to_hdf(path,'df')
with get_store(path) as store:
self.assertFalse(store.get_storer('df').is_table)
self.assertRaises(ValueError, df.to_hdf, path,'df2', append=True)
pandas.set_option('io.hdf.default_format','table')
df.to_hdf(path,'df3')
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df3').is_table)
df.to_hdf(path,'df4',append=True)
with HDFStore(path) as store:
self.assertTrue(store.get_storer('df4').is_table)
pandas.set_option('io.hdf.default_format',None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
self.assertEqual(len(store), 5)
self.assertTrue(set(
store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001,1,2,0,0)
df['datetime2'] = datetime.datetime(2001,1,3,0,0)
df.ix[3:6,['obj1']] = np.nan
df = df.consolidate()._convert(datetime=True)
warnings.filterwarnings('ignore', category=PerformanceWarning)
store['df'] = df
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
store._handle.create_group(store._handle.root,'bah')
repr(store)
str(store)
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df',df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
self.assertIn('a', store)
self.assertIn('b', store)
self.assertNotIn('c', store)
self.assertIn('foo/bar', store)
self.assertIn('/foo/bar', store)
self.assertNotIn('/foo/b', store)
self.assertNotIn('bar', store)
# GH 2694
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
store['node())'] = tm.makeDataFrame()
self.assertIn('node())', store)
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
self.assertEqual(store.root.a._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.b._v_attrs.pandas_version, '0.15.2')
self.assertEqual(store.root.df1._v_attrs.pandas_version, '0.15.2')
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no version
# info
store.get_node('df2')._v_attrs.pandas_version = None
self.assertRaises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r','r+']:
self.assertRaises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path,mode=mode)
self.assertEqual(store._handle.mode, mode)
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r','r+']:
def f():
with HDFStore(path,mode=mode) as store:
pass
self.assertRaises(IOError, f)
else:
with HDFStore(path,mode=mode) as store:
self.assertEqual(store._handle.mode, mode)
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r','r+']:
self.assertRaises(IOError, df.to_hdf, path, 'df', mode=mode)
df.to_hdf(path,'df',mode='w')
else:
df.to_hdf(path,'df',mode=mode)
# conv read
if mode in ['w']:
self.assertRaises(KeyError, read_hdf, path, 'df', mode=mode)
else:
result = read_hdf(path,'df',mode=mode)
assert_frame_equal(result,df)
check('r')
check('r+')
check('a')
check('w')
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
self.assertRaises(PossibleDataLossError, store.open, 'w')
store.close()
self.assertFalse(store.is_open)
# truncation ok here
store.open('w')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 0)
store.close()
self.assertFalse(store.is_open)
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'r')
store.close()
self.assertFalse(store.is_open)
# reopen as append
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
# reopen as append (again)
store.open('a')
self.assertTrue(store.is_open)
self.assertEqual(len(store), 1)
self.assertEqual(store._mode, 'a')
store.close()
self.assertFalse(store.is_open)
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path,mode='a',driver='H5FD_CORE',driver_core_backing_store=0)
store['df'] = df
store.append('df2',df)
tm.assert_frame_equal(store['df'],df)
tm.assert_frame_equal(store['df2'],df)
store.close()
# the file should not have actually been written
self.assertFalse(os.path.exists(path))
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store,'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
self.assertRaises(AttributeError, getattr, store, 'd')
for x in ['mode','path','handle','complib']:
self.assertRaises(AttributeError, getattr, store, x)
# not stores
for x in ['mode','path','handle','complib']:
getattr(store,"_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
self.assertRaises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
# _maybe_remove(store, 'f')
# self.assertRaises(ValueError, store.put, 'f', df[10:], append=True)
# can't put to a table (use append instead)
self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + ["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
if skip_compression:
raise nose.SkipTest("skipping on windows/PY3")
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# cannot use assert_produces_warning here for some reason
# a PendingDeprecationWarning is also raised?
warnings.filterwarnings('ignore', category=PerformanceWarning)
store.put('df',df)
warnings.filterwarnings('always', category=PerformanceWarning)
expected = store.get('df')
tm.assert_frame_equal(expected,df)
def test_append(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.ix[:, :10, :])
store.append('wp1', wp.ix[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :])
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.ix[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.ix[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8),
'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16),
'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32),
'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)},
index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
self.assertIsNone(result.name)
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
self.assertIsNone(result.name)
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
self.assertEqual(result.name, ns.name)
# select on the values
expected = ns[ns>60]
result = store.select('ns',Term('foo>60'))
tm.assert_series_equal(result,expected)
# select on the index and values
expected = ns[(ns>70) & (ns.index<90)]
result = store.select('ns',[Term('foo>70'), Term('index<90')])
tm.assert_series_equal(result,expected)
# multi-index
mi = DataFrame(np.random.randn(5,1),columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5,'C'] = 'bar'
mi.set_index(['C','B'],inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format,index):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df',df,format=format)
assert_frame_equal(df,store['df'])
for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,
tm.makeDateIndex ]:
check('table',index)
check('fixed',index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed',tm.makePeriodIndex)
#check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table',index)
check('fixed',index)
else:
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
check('fixed',index)
def test_encoding(self):
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo',B='bar'),index=range(5))
df.loc[2,'A'] = np.nan
df.loc[3,'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
def test_latin_encoding(self):
if compat.PY2:
self.assertRaisesRegexp(TypeError, '\[unicode\] is not implemented as a table column')
return
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(pandas.Series(val, dtype=dtype))
def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
with ensure_clean_path(self.path) as store:
s.to_hdf(store, key, format='table', encoding=encoding,
nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = s.replace(nan_rep, np.nan)
assert_series_equal(s_nan, retr)
for s in examples:
roundtrip(s)
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A' : Series(np.random.randn(20)).astype('int32'),
'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.ix[0:15,['A1','B','D','E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.ix[:,'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.ix[:,'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.ix[:,'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20)},
index=np.arange(20))
df.ix[0:15,:] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pandas.set_option('io.hdf.dropna_table',False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pandas.set_option('io.hdf.dropna_table',True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar'},
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame({'col1':[0, np.nan, 2], 'col2':[1, np.nan, np.nan]})
with ensure_clean_path(self.path) as path:
df_with_missing.to_hdf(path, 'df_with_missing', format = 'table')
reloaded = read_hdf(path, 'df_with_missing')
tm.assert_frame_equal(df_with_missing, reloaded)
matrix = [[[np.nan, np.nan, np.nan],[1,np.nan,np.nan]],
[[np.nan, np.nan, np.nan], [np.nan,5,6]],
[[np.nan, np.nan, np.nan],[np.nan,3,np.nan]]]
panel_with_missing = Panel(matrix, items=['Item1', 'Item2','Item3'],
major_axis=[1,2],
minor_axis=['A', 'B', 'C'])
with ensure_clean_path(self.path) as path:
panel_with_missing.to_hdf(path, 'panel_with_missing', format='table')
reloaded_panel = read_hdf(path, 'panel_with_missing')
tm.assert_panel_equal(panel_with_missing, reloaded_panel)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.ix[:, :2], axes=['columns'])
store.append('df1', df.ix[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', Term('index=df.index[0:4]')))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index>df.index[4]')))
def test_append_with_different_block_ordering(self):
#GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df['index'] = range(10)
df['index'] += i*10
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1]*len(df),dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index',inplace=True)
store.append('df',df)
# test a different ordering but with more fields (like invalid combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10,2),columns=list('AB'), dtype='float64')
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
store.append('df',df)
# store additonal fields in different blocks
df['int16_2'] = Series([1]*len(df),dtype='int16')
self.assertRaises(ValueError, store.append, 'df', df)
# store multile additonal fields in different blocks
df['float_3'] = Series([1.]*len(df),dtype='float64')
self.assertRaises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
self.assertTrue(getattr(getattr(
store.root, key).table.description, idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with differnt axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select('p4d', [Term(
'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
result = store.select('p4d', [Term(
'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
wp2 = wp.rename_axis(
dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.ix[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.ix[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B','A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 })
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
self.assertEqual(store.get_storer('df').data_columns, ['B'])
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])
_maybe_remove(store, 'df')
self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.loc[:,'B'].iloc[0] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', [Term('B>0')])
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', [Term('B>0'), Term('index>df.index[3]')])
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key,name,size):
self.assertEqual(getattr(store.get_storer(key).table.description,name).itemsize, size)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.ix[0,'A'] = 1.
df_new.ix[0,'B'] = -1.
df_new['string'] = 'foo'
df_new.loc[1:4,'string'] = np.nan
df_new.loc[5:6,'string'] = 'bar'
df_new['string2'] = 'foo'
df_new.loc[2:5,'string2'] = np.nan
df_new.loc[7:8,'string2'] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df', [Term('string=foo'), Term(
'string2=foo'), Term('A>0'), Term('B<0')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', [Term('string=foo'), Term(
'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc._convert(datetime=True)
df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc, data_columns=['B', 'C',
'string', 'string2', 'datetime'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.ix[4:6,'string'] = np.nan
df_dc.ix[7:9,'string'] = 'bar'
df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
result = store.select('df_dc', [ Term('B>0') ])
expected = df_dc[df_dc.B>0]
tm.assert_frame_equal(result,expected)
result = store.select('df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
tm.assert_frame_equal(result,expected)
with ensure_clean_store(self.path) as store:
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1',p)
tm.assert_panel_equal(store.select('p1'),p)
store.append('p2',p,data_columns=True)
tm.assert_panel_equal(store.select('p2'),p)
result = store.select('p2',where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
tm.assert_frame_equal(result.to_frame(),expected)
result = store.select('p2',where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]
tm.assert_frame_equal(result.to_frame(),expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
def col(t,column):
return getattr(store.get_storer(t).table.cols,column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'], data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path,'df',format='table')
result = read_hdf(path,'df',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])
df = DataFrame(np.arange(12).reshape(3,4), columns=index)
with ensure_clean_store(self.path) as store:
store.put('df',df)
tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])
self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df,df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))
with ensure_clean_store(self.path) as store:
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s',s)
xp = Series(np.zeros(12), index=make_index(['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))
self.assertRaises(ValueError, store.append, 'df',df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))
self.assertRaises(ValueError, store.append, 'df',df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select('df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"),s)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df',df)
self.assertRaises(TypeError, store.select, 'df', columns=['A'])
self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
# unsuported data types for non-tables
p4d = tm.makePanel4D()
self.assertRaises(TypeError, store.put,'p4d',p4d)
# unsuported data types
self.assertRaises(TypeError, store.put,'abc',None)
self.assertRaises(TypeError, store.put,'abc','123')
self.assertRaises(TypeError, store.put,'abc',123)
self.assertRaises(TypeError, store.put,'abc',np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path,mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result,obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
p = tm.makePanel()
check(p, assert_panel_equal)
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df',df_empty)
self.assertRaises(KeyError,store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10,3),columns=list('ABC'))
store.append('df',df)
assert_frame_equal(store.select('df'),df)
store.append('df',df_empty)
assert_frame_equal(store.select('df'),df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2',df)
assert_frame_equal(store.select('df2'),df)
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p',p_empty)
self.assertRaises(KeyError,store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3,4,5),items=list('ABC'))
store.append('p',p)
assert_panel_equal(store.select('p'),p)
store.append('p',p_empty)
assert_panel_equal(store.select('p'),p)
# store
store.put('p2',p_empty)
assert_panel_equal(store.select('p2'),p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df',df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
self.assertRaises(TypeError, store.append,'df',df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001,1,2),index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
self.assertEqual(df.dtypes['invalid'], np.object_)
self.assertRaises(TypeError, store.append,'df', df)
# directy ndarray
self.assertRaises(TypeError, store.append,'df',np.arange(10))
# series directly
self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df',df)
df['foo'] = 'foo'
self.assertRaises(ValueError, store.append,'df',df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
self.assertRaises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes,store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes,store['df_i8'].dtypes)
# incompatible dtype
self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)
df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes,store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in
['float32','float64','int32','int64','int16','int8'] ]))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1,
'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1,
'datetime64[ns]' : 2})
result.sort()
expected.sort()
tm.assert_series_equal(result,expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
### currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df.consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01', datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pandas.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed',s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table',s)
result = store.select('table')
assert_series_equal(result, s)
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
from datetime import timedelta
df = DataFrame(dict(A = Timestamp('20130101'), B = [ Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ]))
df['C'] = df['A']-df['B']
df.ix[3:5,'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df',df,data_columns=True)
result = store.select('df')
assert_frame_equal(result,df)
result = store.select('df',Term("C<100000"))
assert_frame_equal(result,df)
result = store.select('df',Term("C","<",-3*86400))
assert_frame_equal(result,df.iloc[3:])
result = store.select('df',"C<'-3D'")
assert_frame_equal(result,df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df',"C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result,df.iloc[6:])
result = store.select('df',"C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result,df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2',df)
result = store.select('df2')
assert_frame_equal(result,df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
self.assertEqual(len(store), 1)
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
self.assertEqual(len(store), 0)
# nonexistence
self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
self.assertEqual(len(store), 1)
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
self.assertEqual(len(store), 1)
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
self.assertEqual(len(store), 0)
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
# non-existance
crit1 = Term('index>foo')
self.assertRaises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
self.assertTrue(n == 120)
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, format='f')
# self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
self.assertTrue(n == 120-32)
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32//4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
self.assertTrue(n == 32)
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32//4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
self.assertTrue(n == 32)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32//4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
self.assertTrue(n == 120-32)
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32//4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
self.assertTrue(n == 120-32)
result = store.select('wp5')
expected = wp.reindex(major_axis=wp.major_axis[:16//4].union(wp.major_axis[-16//4:]))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
self.assertTrue(n == 0)
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
date = wp.major_axis.take(np.arange(0,30,3))
crit = Term('major_axis=date')
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
self.assertTrue(n == 28)
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(wp.major_axis[np.arange(0,20,3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = Term('major_axis=date4')
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
self.assertTrue(n == 36)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis>date')
crit2 = Term("minor_axis=['A', 'D']")
n = store.remove('wp', where=[crit1])
self.assertTrue(n == 56)
n = store.remove('wp', where=[crit2])
self.assertTrue(n == 32)
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis=date2')
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = Term('major_axis=date3')
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1).difference(Index([date2])).difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[0:4,'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
self.assertRaises(ValueError, store.select, 'wp', "minor=['A', 'B']")
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114']"])
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114', '20121114']"])
self.assertRaises(TypeError, Term)
# more invalid
self.assertRaises(ValueError, store.select, 'df','df.index[3]')
self.assertRaises(SyntaxError, store.select, 'df','index>')
self.assertRaises(ValueError, store.select, 'wp', "major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table',data_columns=True)
# check ok
read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path,'dfq',where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table')
self.assertRaises(ValueError, read_hdf, path,'dfq',where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
p4d = tm.makePanel4D()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(), 0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
store.put('wpneg', wpneg, format='table')
# panel
result = store.select('wp', [Term(
'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select('wp', [Term(
'major_axis','<',"20000108"), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = store.select('p4d', [Term('major_axis<"20000108"'),
Term("minor_axis=['A', 'B']"),
Term("items=['ItemA', 'ItemB']")])
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
# back compat invalid terms
terms = [
dict(field='major_axis', op='>', value='20121114'),
[ dict(field='major_axis', op='>', value='20121114') ],
[ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
]
for t in terms:
with tm.assert_produces_warning(expected_warning=DeprecationWarning,
check_stacklevel=False):
Term(t)
# valid terms
terms = [
('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = [
(("labels=['l1', 'l2']"),),
Term("labels=['l1', 'l2']"),
]
for t in terms:
store.select('p4d', t)
with tm.assertRaisesRegexp(TypeError, 'Only named functions are supported'):
store.select('wp', Term('major_axis == (lambda x: x)("20130101")'))
# check USub node parsing
res = store.select('wpneg', Term('items == -1'))
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assertRaisesRegexp(NotImplementedError,
'Unary addition not supported'):
store.select('wpneg', Term('items == +1'))
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
result = store.select('wp', [Term('major_axis>20000102'),
Term('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', Term('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_backwards_compat_without_term_object(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
with tm.assert_produces_warning(expected_warning=DeprecationWarning,
check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis>20000102'),
('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', ('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
with tm.assert_produces_warning(expected_warning=DeprecationWarning,
check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning,
check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning,
check_stacklevel=not compat.PY3):
result = store.select('wp', [('major_axis','=',[datetime.datetime(2000,1,2,0,0),
datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
with tm.assert_produces_warning(expected_warning=DeprecationWarning,
check_stacklevel=not compat.PY3):
result = store.select('wp', [('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))
store.put('df', df, format='table')
expected = df[df.index>pd.Timestamp('20130105')]
import datetime
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
from datetime import datetime
# technically an error, but allow it
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
result = store.select('df','index>datetime(2013,1,5)')
assert_frame_equal(result,expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal, check_index_type=False)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
expected_warning = Warning if compat.PY35 else PerformanceWarning
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
# nose has a deprecation warning in 3.5
expected_warning = Warning if compat.PY35 else PerformanceWarning
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
if not skip_compression:
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
if not skip_compression:
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
self.assertTrue(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ('foo', 'bar'))
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
if not skip_compression:
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=['A'])
expected = df.loc[:,['A']]
assert_frame_equal(result,expected)
# dups accross dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['A']]
result = store.select('df',columns=['A'])
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['B','A']]
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df',df)
store.append('df',df)
expected = df.loc[:,['B','A']]
expected = concat([expected, expected])
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
def test_wide_table_dups(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
with tm.assert_produces_warning(expected_warning=DuplicateWarning):
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
df = DataFrame(np.random.binomial(n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1e3)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
def test_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(
np.random.randn(100, 100, 100), items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', Term('items=items'))
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5,2), columns =['A','B'])
df['object'] = 'foo'
df.ix[4:5,'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns = True)
expected = df[df.boolv == True].reindex(columns=['A','boolv'])
for v in [True,'true',1]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False ].reindex(columns=['A','boolv'])
for v in [False,'false',0]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', [Term("index<10"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', [Term("index<10.0"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
store.append('df1',df,data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values']>2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values']>2.0]
store.append('df2',df,data_columns=True,index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
#store.append('df3',df,data_columns=True)
#result = store.select(
# 'df3', where='values>2.0')
#tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values']>2.0]
store.append('df4',df,data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
expected = df[df['A']>0]
store.append('df', df, data_columns=True)
np_zero = np.float64(0)
result = store.select('df', where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select('df', [Term("ts>=Timestamp('2012-02-01') & users=['a','b','c']")])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ]
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')"),Term('users=selector')])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ]
tm.assert_frame_equal(expected, result)
selector = range(100,200)
result = store.select('df', [Term('B=selector')])
expected = df[ df.B.isin(selector) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', [Term('ts=selector')])
expected = df[ df.ts.isin(selector.values) ]
tm.assert_frame_equal(expected, result)
self.assertEqual(len(result), 100)
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [ s for s in store.select('df',iterator=True) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=100) ]
self.assertEqual(len(results), 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [ s for s in store.select('df',chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df_non_table')
self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)
self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df',format='table')
results = [ s for s in read_hdf(path,'df',chunksize=100) ]
result = concat(results)
self.assertEqual(len(results), 5)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path,'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1',df1,data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2',df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [ s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# where selection
#expected = store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1')
#results = []
#for s in store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):
# results.append(s)
#result = concat(results)
#tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df',where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [ s for s in store.select('df',chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize=1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
self.assertEqual(0, len(results))
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize=int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df',expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize-1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be 1, is 10
tm.assert_equal(1, len(results))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [ s for s in store.select('df',where=where,chunksize=chunksize) ]
# should be []
tm.assert_equal(0, len(results))
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(A = Series(lrange(3),
index=date_range('2000-1-1',periods=3,freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store,'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df,result)
for attr in ['freq','tz','name']:
for idx in ['index','columns']:
self.assertEqual(getattr(getattr(df,idx),attr,None),
getattr(getattr(result,idx),attr,None))
# try to append a table with a different frequency
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df2 = DataFrame(dict(A = Series(lrange(3),
index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('data',df2)
self.assertIsNone(store.get_storer('data').info['index']['freq'])
# this is ok
_maybe_remove(store,'df2')
df2 = DataFrame(dict(A = Series(lrange(3),
index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')])))
store.append('df2',df2)
df3 = DataFrame(dict(A = Series(lrange(3),index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('df2',df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
expected_warning = Warning if compat.PY35 else AttributeConflictWarning
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))
df.to_hdf(path,'data',mode='w',append=True)
df2 = DataFrame(dict(A = Series(lrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))
df2.to_hdf(path,'data',append=True)
idx = date_range('2000-1-1',periods=3,freq='H')
idx.name = 'foo'
df = DataFrame(dict(A = Series(lrange(3), index=idx)))
df.to_hdf(path,'data',mode='w',append=True)
self.assertEqual(read_hdf(path,'data').index.name, 'foo')
with tm.assert_produces_warning(expected_warning=expected_warning, check_stacklevel=False):
idx2 = date_range('2001-1-1',periods=3,freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A = Series(lrange(3), index=idx2)))
df2.to_hdf(path,'data',append=True)
self.assertIsNone(read_hdf(path,'data').index.name)
def test_panel_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df,format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
self.assertEqual(crit1.env.scope['date'], date)
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
# self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4],'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string!='bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
self.assertRaises(NotImplementedError, store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:,df.columns.difference(['A','B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select('df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf','hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({ 'A' : [1,1,2,2,3] })
parms.to_hdf(pp,'df',mode='w',format='table',data_columns=['A'])
selection = read_hdf(pp,'df',where='A=[2,3]')
hist = DataFrame(np.random.randn(25,1),columns=['data'],
index=MultiIndex.from_tuples([ (i,j) for i in range(5) for j in range(5) ],
names=['l1','l2']))
hist.to_hdf(hh,'df',mode='w',format='table')
expected = read_hdf(hh,'df',where=Term('l1','=',[2,3,4]))
# list like
result = read_hdf(hh,'df',where=Term('l1','=',selection.index.tolist()))
assert_frame_equal(result, expected)
l = selection.index.tolist()
# sccope with list like
store = HDFStore(hh)
result = store.select('df',where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh,'df',where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index
result = read_hdf(hh,'df',where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df',where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
# not implemented
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.ix[2:7,'x'] = ''
store.append('df',df,data_columns=['x'])
result = store.select('df',Term('x=none'))
expected = df[df.x == 'none']
assert_frame_equal(result,expected)
try:
result = store.select('df',Term('x!=none'))
expected = df[df.x != 'none']
assert_frame_equal(result,expected)
except Exception as detail:
com.pprint_thing("[{0}]".format(detail))
com.pprint_thing(store)
com.pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x=='','x'] = np.nan
store.append('df2',df2,data_columns=['x'])
result = store.select('df2',Term('x!=none'))
expected = df2[isnull(df2.x)]
assert_frame_equal(result,expected)
# int ==/!=
df['int'] = 1
df.ix[2:7,'int'] = 2
store.append('df3',df,data_columns=['int'])
result = store.select('df3',Term('int=2'))
expected = df[df.int==2]
assert_frame_equal(result,expected)
result = store.select('df3',Term('int!=2'))
expected = df[df.int!=2]
assert_frame_equal(result,expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
self.assertRaises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where = ['index>5'])
self.assertRaises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
self.assertIsInstance(result, Series)
# not a data indexable column
self.assertRaises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.ix[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all() == True)
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all() == True)
result = store.select('df', where=c)
expected = df.ix[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all() == True)
result = store.select('df', where=c)
expected = df.ix[3:4, :]
tm.assert_frame_equal(result, expected)
self.assertIsInstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000,2),index=date_range('20000101',periods=1000))
store.append('df',df)
c = store.select_column('df','index')
where = c[DatetimeIndex(c).month==5].index
expected = df.iloc[where]
# locations
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# boolean
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# invalid
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df),dtype='float64'))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)+1))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5)
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5,stop=10)
# selection with filter
selection = date_range('20000101',periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result,expected)
# list
df = DataFrame(np.random.randn(10,2))
store.append('df2',df)
result = store.select('df2',where=[0,3,5])
expected = df.iloc[[0,3,5]]
tm.assert_frame_equal(result,expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2',where=where)
expected = df.loc[where]
tm.assert_frame_equal(result,expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result,expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df, selector='df3')
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.ix[1, ['A', 'B']] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=False)
self.assertRaises(
ValueError, store.select_as_multiple, ['df1', 'df2'])
assert not store.select('df1').index.equals(
store.select('df2').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
self.assertRaises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df1','df2'], where=['A>0', 'B>0'], selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(['df1', 'df2'], where=[Term(
'index>df2.index[4]')], selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
def test_nan_selection_bug_4858(self):
# GH 4858; nan selection bug, only works for pytables >= 3.1
if LooseVersion(tables.__version__) < '3.1.0':
raise nose.SkipTest('tables version does not support fix for nan selection bug: GH 4858')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols = range(6), values = range(6)), dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols = ['13.0','14.0','15.0'], values = [3.,4.,5.]), index=[3,4,5])
# write w/o the index on that particular column
store.append('df',df, data_columns=True,index=['cols'])
result = store.select('df',where='values>2.0')
assert_frame_equal(result,expected)
def test_start_stop(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', [Term("columns=['A']")], start=0, stop=5)
expected = df.ix[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', [Term("columns=['A']")], start=30, stop=40)
assert(len(result) == 0)
assert(type(result) == DataFrame)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = Term('columns=df.columns[:75]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
crit = Term('columns=df.columns[:75:2]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75:2]])
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
# sorted_obj = _test_sort(obj)
comparator(retrieved, obj)
def test_multiple_open_close(self):
# GH 4409, open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
# single
store = HDFStore(path)
self.assertNotIn('CLOSED', str(store))
self.assertTrue(store.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
self.assertRaises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
self.assertNotIn('CLOSED', str(store1))
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store1.is_open)
self.assertTrue(store2.is_open)
store1.close()
self.assertIn('CLOSED', str(store1))
self.assertFalse(store1.is_open)
self.assertNotIn('CLOSED', str(store2))
self.assertTrue(store2.is_open)
store2.close()
self.assertIn('CLOSED', str(store1))
self.assertIn('CLOSED', str(store2))
self.assertFalse(store1.is_open)
self.assertFalse(store2.is_open)
# nested close
store = HDFStore(path,mode='w')
store.append('df',df)
store2 = HDFStore(path)
store2.append('df2',df)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
# double closing
store = HDFStore(path,mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
self.assertIn('CLOSED', str(store))
self.assertFalse(store.is_open)
store2.close()
self.assertIn('CLOSED', str(store2))
self.assertFalse(store2.is_open)
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
store = HDFStore(path)
store.close()
self.assertRaises(ClosedFileError, store.keys)
self.assertRaises(ClosedFileError, lambda : 'df' in store)
self.assertRaises(ClosedFileError, lambda : len(store))
self.assertRaises(ClosedFileError, lambda : store['df'])
self.assertRaises(ClosedFileError, lambda : store.df)
self.assertRaises(ClosedFileError, store.select, 'df')
self.assertRaises(ClosedFileError, store.get, 'df')
self.assertRaises(ClosedFileError, store.append, 'df2', df)
self.assertRaises(ClosedFileError, store.put, 'df3', df)
self.assertRaises(ClosedFileError, store.get_storer, 'df2')
self.assertRaises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
with ensure_clean_store(tm.get_data_path('legacy_hdf/pytables_native.h5'), mode='r') as store:
d2 = store['detector/readout']
self.assertIsInstance(d2, DataFrame)
with ensure_clean_store(tm.get_data_path('legacy_hdf/pytables_native2.h5'), mode='r') as store:
str(store)
d1 = store['detector']
self.assertIsInstance(d1, DataFrame)
def test_legacy_read(self):
with ensure_clean_store(tm.get_data_path('legacy_hdf/legacy.h5'), mode='r') as store:
store['a']
store['b']
store['c']
store['d']
def test_legacy_table_read(self):
# legacy table types
with ensure_clean_store(tm.get_data_path('legacy_hdf/legacy_table.h5'), mode='r') as store:
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
with tm.assert_produces_warning(expected_warning=IncompatibilityWarning):
self.assertRaises(
Exception, store.select, 'wp1', Term('minor_axis=B'))
df2 = store.select('df2')
result = store.select('df2', Term('index>df2.index[2]'))
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
def test_legacy_0_10_read(self):
# legacy from 0.10
with ensure_clean_store(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), mode='r') as store:
str(store)
for k in store.keys():
store.select(k)
def test_legacy_0_11_read(self):
# legacy from 0.11
path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
with ensure_clean_store(tm.get_data_path(path), mode='r') as store:
str(store)
assert 'df' in store
assert 'df1' in store
assert 'mi' in store
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
def test_copy(self):
def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
try:
if f is None:
f = tm.get_data_path(os.path.join('legacy_hdf',
'legacy_0.10.h5'))
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
self.assertEqual(set(keys), set(tstore.keys()))
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assertEqual(orig_t.nrows, new_t.nrows)
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
self.assertTrue(new_t[a.name].is_indexed)
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
do_copy()
do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns = ['A'])
st.close()
do_copy(f = path)
do_copy(f = path, propindexes = False)
finally:
safe_remove(path)
def test_legacy_table_write(self):
raise nose.SkipTest("cannot write legacy tables")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
store.append('mi', df)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10))
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
store.append('wp', wp)
store.close()
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
self.assertEqual(store['a'].index[0], dt)
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEqual(type(result.index), type(ser.index))
self.assertEqual(result.index.freq, ser.index.freq)
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEqual(type(result.index), type(df.index))
self.assertEqual(result.index.freq, df.index.freq)
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
compat_assert_produces_warning(PerformanceWarning, f)
def test_unicode_longer_encoded(self):
# GH 11234
char = '\u0394'
df = pd.DataFrame({'A': [char]})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# self.assertRaises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with tm.assertRaises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b', 'test & test', 'c' , 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# basic
_maybe_remove(store, 's')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
_maybe_remove(store, 's_ordered')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
_maybe_remove(store, 'df')
df = DataFrame({"s":s, "vals":[1,2,3,4,5,6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# dtypes
s = Series([1,1,2,2,3,4,5]).astype('category')
store.append('si',s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1,1,np.nan,2,3,4,5]).astype('category')
store.append('si2',s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2',df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# make sure the metadata is ok
self.assertTrue('/df2 ' in str(store))
self.assertTrue('/df2/meta/values_block_0/meta' in str(store))
self.assertTrue('/df2/meta/values_block_1/meta' in str(store))
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=['a','b','c','d'],ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where = ['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where = ['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# appending with same categories is ok
store.append('df3', df)
df = concat([df,df])
expected = df[df.s.isin(['b','c'])]
result = store.select('df3', where = ['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
self.assertRaises(ValueError, lambda : store.append('df3', df3))
# remove
# make sure meta data is removed (its a recursive removal so should be)
result = store.select('df3/meta/s/meta')
self.assertIsNotNone(result)
store.remove('df3')
self.assertRaises(KeyError, lambda : store.select('df3/meta/s/meta'))
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1,2], "A": ["x","y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
self.assertTrue(df.equals(other))
self.assertTrue(other.equals(df))
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10,5)))
df.index = timedelta_range(start='0s',periods=10,freq='1s',name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_colums_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names+df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load)
self.assertTrue(cols2load_original == cols2load)
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [ tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex ]
types_should_run = [ tm.makeStringIndex, tm.makeCategoricalIndex ]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
types_should_fail.append(tm.makeUnicodeIndex)
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with self.assertRaises(ValueError,
msg="cannot have non-object label DataIndexableCol"):
df.to_hdf(path, 'df', format='table', data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
self.assertTrue(store.is_open)
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
self.assertTrue(isinstance(iterator, TableIterator))
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
self.assertRaises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
self.assertRaises(IOError, read_hdf, store, 'df')
with open(path, mode='r') as store:
self.assertRaises(NotImplementedError, read_hdf, store, 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
self.assertRaises(ValueError, df.to_hdf, path, 'df', complib='blosc:zlib')
# GH10443
def test_read_nokey(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a')
self.assertRaises(ValueError, read_hdf, path)
class TestHDFComplexValues(Base):
# GH10447
def test_complex_fixed(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_table(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', mode='w')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_table(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
result = store.select('df', where=Term('A>2'))
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
objs = [s, df, p]
comps = [tm.assert_series_equal, tm.assert_frame_equal,
tm.assert_panel_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='fixed')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
p4d = pd.Panel4D({'i': p, 'ii': p})
objs = [df, p, p4d]
comps = [tm.assert_frame_equal, tm.assert_panel_equal,
tm.assert_panel4d_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='table')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_indexing_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex128},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
self.assertRaises(TypeError, store.append, 'df', df, data_columns=['C'])
def test_complex_series_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
with ensure_clean_path(self.path) as path:
self.assertRaises(TypeError, s.to_hdf, path, 'obj', format='t')
with ensure_clean_path(self.path) as path:
s.to_hdf(path, 'obj', format='t', index=False)
reread = read_hdf(path, 'obj')
tm.assert_series_equal(s, reread)
def test_complex_append(self):
df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
'b': np.random.randn(100)})
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['b'])
store.append('df', df)
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
class TestTimezones(Base, tm.TestCase):
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i,c]
b_e = b.loc[i,c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e, b_e))
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
tm._skip_if_no_dateutil()
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues.
from pandas.tslib import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
expected = df[df.A >= df.A[3]]
result = store.select('df_tz', where=Term('A>=df.A[3]'))
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130603', tz=gettz('US/Eastern'))), index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('EET'))), index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('CET'))), index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
store.append('df_tz',df,data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result,df)
assert_frame_equal(result,df)
# select with tz aware
self._compare_with_tz(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130603',tz='US/Eastern')),index=range(5))
store.append('df_tz',df)
result = store['df_tz']
self._compare_with_tz(result,df)
assert_frame_equal(result,df)
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz',df,data_columns=['A','B'])
result = store['df_tz']
self._compare_with_tz(result,df)
assert_frame_equal(result,df)
# can't append with diff timezone
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df',df)
result = store.select('df')
assert_frame_equal(result,df)
_maybe_remove(store, 'df')
store.append('df',df)
result = store.select('df')
assert_frame_equal(result,df)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, DatetimeIndex(result.values).tz)
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, result.dt.tz)
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
self.assertEqual(rng.tz, result.dt.tz)
def test_timezones_fixed(self):
with ensure_clean_store(self.path) as store:
# index
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, 'df')
df = DataFrame({'A' : rng,
'B' : rng.tz_convert('UTC').tz_localize(None),
'C' : rng.tz_convert('CET'),
'D' : range(len(rng))}, index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assertTrue(recons.index.equals(rng))
self.assertEqual(rng.tz, recons.index.tz)
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read back in a new timezone
import platform
if platform.system() == "Windows":
raise nose.SkipTest("timezone setting not supported on windows")
import datetime
import time
import os
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
orig_tz = os.environ.get('TZ')
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ']=tz
time.tzset()
try:
with ensure_clean_store(self.path) as store:
setTZ('EST5EDT')
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
setTZ('CST6CDT')
result = store['obj1']
assert_frame_equal(result, df)
finally:
setTZ(orig_tz)
def test_legacy_datetimetz_object(self):
# legacy from < 0.17.0
# 8260
expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5))
with ensure_clean_store(tm.get_data_path('legacy_hdf/datetimetz_object.h5'), mode='r') as store:
result = store['df']
assert_frame_equal(result, expected)
def test_dst_transitions(self):
# make sure we are not failing on transaitions
with ensure_clean_store(self.path) as store:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times+pd.Timedelta('10min')]:
_maybe_remove(store, 'df')
df = DataFrame({'A' : range(len(i)), 'B' : i }, index=i)
store.append('df',df)
result = store.select('df')
assert_frame_equal(result, df)
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
elif isinstance(obj, Panel):
return obj.reindex(major=sorted(obj.major_axis))
else:
raise ValueError('type not supported here')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| artistic-2.0 |
rtrwalker/geotecha | geotecha/consolidation/smear_zones.py | 1 | 127140 | # geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""Smear zones associated with vertical drain installation.
Smear zone permeability distributions etc.
"""
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
#import cmath
from numpy import log, sqrt
import scipy.special as special
def mu_ideal(n, *args):
"""Smear zone permeability/geometry parameter for ideal drain (no smear)
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
args : anything
`args` does not contribute to any calculations it is merely so you
can have other arguments such as s and kappa which are used in other
smear zone formulations.
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
The :math:`\\mu` parameter is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left({\\ln\\left({n}\\right)-\\frac{3}{4}}\\right)+
\\frac{1}{\\left({n^2-1}\\right)}\\left({1-\\frac{1}{4n^2}}
\\right)
where:
.. math:: n = \\frac{r_e}{r_w}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius
References
----------
.. [1] Hansbo, S. 1981. "Consolidation of Fine-Grained Soils by
Prefabricated Drains". In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
if np.any(n <= 1):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
term1 = n**2 / (n**2 - 1) * (log(n) - 0.75)
term2 = 1 / (n**2 - 1) * (1 - 1/(4 * n**2))
mu = term1 + term2
return mu
def mu_constant(n, s, kap):
"""Smear zone parameter for smear zone with constant permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw)
kap : float or ndarray of float.
Ratio of undisturbed horizontal permeability to smear zone
horizontal permeanility (kh / ks).
Returns
-------
mu : float
smear zone permeability/geometry parameter
Notes
-----
The :math:`\\mu` parameter is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left({\\ln\\left({\\frac{n}{s}}\\right)
+\\kappa\\ln\\left({s}\\right)
-\\frac{3}{4}}\\right)+
\\frac{s^2}{\\left({n^2-1}\\right)}\\left({1-\\frac{s^2}{4n^2}}
\\right)
+\\frac{\\kappa}{\\left({n^2-1}\\right)}\\left({\\frac{s^4-1}{4n^2}}
-s^2+1
\\right)
where:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability.
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n <= 1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s < 1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap <= 0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in
np.atleast_1d(kap)])))
if np.any(s > n):
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)]),
', '.join([str(v) for v in np.atleast_1d(n)])))
term1 = n**2 / (n**2 - 1) * (log(n/s) + kap * log(s) - 0.75)
term2 = s**2 / (n**2 - 1) * (1 - s**2 /(4 * n**2))
term3 = kap / (n**2 - 1) * ((s**4 - 1) / (4 * n**2) - s**2 +1)
mu = term1 + term2 + term3
return mu
def _sx(n, s):
"""Value of s=r/rw marking the start of overlapping linear smear zones
`s` is usually larger than `n` when considering overlapping smear zones
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
Returns
-------
sx : float or ndarray of float
Value of s=r/rw marking the start of the overlapping zone
Notes
-----
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
See Also
--------
mu_overlapping_linear : uses _sx
_kapx : used in mu_overlapping_linear
"""
sx = 2 * n - s
return sx
def _kapx(n, s, kap):
"""Value of kap=kh/ks for overlap part of intersecting linear smear zones
Assumes `s` is greater than `n`.
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw)
kap : float or ndarray of float.
Ratio of undisturbed horizontal permeability to smear zone
horizontal permeanility (kh / ks).
Returns
-------
kapx : float
Value of kap=kh/ks for overlap part of intersecting linear smear zones
Notes
-----
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
See Also
--------
mu_overlapping_linear : uses _kapx
_sx : used in mu_overlapping_linear
"""
sx = _sx(n, s)
kapx = 1 + (kap - 1) / (s - 1) * (sx - 1)
return kapx
def mu_overlapping_linear(n, s, kap):
"""\
Smear zone parameter for smear zone with overlapping linear permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
The smear zone parameter :math:`\\mu` is given by:
.. math::
\\mu_X =
\\left\\{\\begin{array}{lr}
\\mu_L\\left({n,s,\\kappa}\\right) & n\\geq s \\\\
\\frac{\\kappa}{\\kappa_X}\\mu_L
\\left({n, s_X,\\kappa_x}\\right)
& \\frac{s+1}{2}<n<s \\\\
\\frac{\\kappa}{\\kappa_X}\\mu_I
\\left({n}\\right) & n\\leq \\frac{s+1}{2}
\\end{array}\\right.
where :math:`\\mu_L` is the :math:`\\mu` parameter for non_overlapping
smear zones with linear permeability, :math:`\\mu_I` is the :math:`\\mu`
parameter for no smear zone, and:
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
See Also
--------
mu_linear : :math:`\\mu` for non-overlapping smear zones
mu_ideal : :math:`\\mu` for ideal drain with no smear zone
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def mu_intersecting(n, s, kap):
"""mu for intersecting smear zones that do not completely overlap"""
sx = _sx(n, s)
kapx = _kapx(n, s, kap)
mu = mu_linear(n, sx, kapx) * kap / kapx
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n <= 1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s < 1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap <= 0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in
np.atleast_1d(kap)])))
is_array = any([isinstance(v, np.ndarray) for v in [n, s, kap]])
n = np.atleast_1d(n)
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if len([v for v in [n, s] if v.shape == kap.shape]) != 2:
raise ValueError('n, s, and kap must have the same shape. You have '
'lengths for n, s, kap of {}, {}, {}.'.format(
len(n), len(s), len(kap)))
ideal = np.isclose(s, 1) | np.isclose(kap, 1)
normal = (n >= s) & (~ideal)
all_disturbed = (2 * n - s <= 1) & (~ideal)
intersecting = ~(ideal | normal | all_disturbed)
mu = np.empty_like(n, dtype=float)
mu[ideal] = mu_ideal(n[ideal])
mu[normal] = mu_linear(n[normal], s[normal], kap[normal])
mu[all_disturbed] = kap[all_disturbed] * mu_ideal(n[all_disturbed])
mu[intersecting] = mu_intersecting(n[intersecting], s[intersecting],
kap[intersecting])
if is_array:
return mu
else:
return mu[0]
def mu_linear(n, s, kap):
"""Smear zone parameter for smear zone linear variation of permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
For :math:`s\\neq\\kappa`, :math:`\\mu` is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left[{
\\ln\\left({\\frac{n}{s}}\\right)
-\\frac{3}{4}
+\\frac{s^2}{n^2}\\left({1-\\frac{s^2}{4n^2}}\\right)
-\\frac{\\kappa}{B}\\ln\\left({\\frac{\\kappa}{s}}\\right)
+\\frac{\\kappa B}{A^2 n^2}\\left({2-\\frac{B^2}{A^2 n^2}}
\\right)\\ln\\left({\\kappa}\\right)
-\\frac{\\kappa\\left({s-1}\\right)}{A n^2}
\\left\\{
2
+\\frac{1}{n^2}
\\left[
{\\frac{A-B}{A}\\left({\\frac{1}{A}}-\\frac{s+1}{2}
\\right)}
-\\frac{s+1}{2}
-\\frac{\\left({s-1}\\right)^2}{3}
\\right]
\\right\\}
}\\right]
and for the special case :math:`s=\\kappa`, :math:`\\mu` is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left[{
\\ln\\left({\\frac{n}{s}}\\right)
-\\frac{3}{4}
+s-1
-\\frac{s^2}{n^2}\\left({1-\\frac{s^2}{12n^2}}\\right)
-\\frac{s}{n^2}\\left({2-\\frac{1}{3n^2}}\\right)
}\\right]
where :math:`A` and :math:`B` are:
.. math:: A=\\frac{\\kappa-1}{s-1}
.. math:: B=\\frac{s-\\kappa}{s-1}
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def mu_s_neq_kap(n, s, kap):
"""mu for when s != kap"""
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
term1 = n**2 / (n**2 - 1)
term2 = (log(n / s) + s ** 2 / (n ** 2) *
(1 - s ** 2 / (4 * n ** 2)) - 3 / 4)
term3 = kap * (1 - s ** 2 / n ** 2)
term4 = (1 / B * log(s / kap)
- 1 / (n ** 2 * A ** 2) * (kap - 1 - B * log(kap)))
term5 = term2 + term3 * term4
term6 = 1 / (n ** 2 * B)
term7 = (s ** 2 * log(s) - (s ** 2 - 1) / 2
+ 1 / A ** 2 * ((kap ** 2 - 1) / 2 - kap ** 2 * log(kap) + 2 * B * (kap * log(kap) - (kap - 1))))
term8 = -1 / (n ** 4 * A ** 2)
term9 = (B / 3 * (s ** 2 - 1) + 2 / 3 * (s ** 2 * kap - 1) - (s ** 2 - 1)
+ B / A ** 2 * ((kap ** 2 - 1) / 2 - kap ** 2 * log(kap) + 2 * B * (kap * log(kap) - (kap - 1))))
term10 = kap * (term6 * term7 + term8 * term9)
mu = term1 * (term5 + term10)
return mu
def mu_s_eq_kap(n, s):
"""mu for s == kap"""
term1 = n ** 2 / (n ** 2 - 1)
term2 = (log(n / s)
+ s ** 2 / (n ** 2) * (1 - s ** 2 / (4 * n ** 2)) - 3 / 4)
term3 = (-s / n ** 2 * (1 - s ** 2 / n ** 2) * (s - 1)
+ (1 - s ** 2 / n ** 2) * (s - 1))
term4 = (s / n ** 4 * (s ** 2 - 1)
- 2 * s / (3 * n ** 4) * (s ** 3 - 1)
- (s / n ** 2 - s ** 2 / n ** 2) * (s - 1))
mu = term1 * (term2 + term3 + term4)
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n<=1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s<1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(s>=n):
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)]),
', '.join([str(v) for v in np.atleast_1d(n)])))
is_array = any([isinstance(v, np.ndarray) for v in [n, s, kap]])
n = np.atleast_1d(n)
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if len([v for v in [n, s] if v.shape==kap.shape])!=2:
raise ValueError('n, s, and kap must have the same shape. You have '
'lengths for n, s, kap of {}, {}, {}.'.format(
len(n), len(s), len(kap)))
mu = np.empty_like(n, dtype=float)
ideal = np.isclose(s, 1) | np.isclose(kap, 1)
s_eq_kap = np.isclose(s, kap) & ~ideal
s_neq_kap = ~np.isclose(s, kap) & ~ideal
mu[ideal] = mu_ideal(n[ideal])
mu[s_eq_kap] = mu_s_eq_kap(n[s_eq_kap], s[s_eq_kap])
mu[s_neq_kap] = mu_s_neq_kap(n[s_neq_kap], s[s_neq_kap], kap[s_neq_kap])
if is_array:
return mu
else:
return mu[0]
def mu_parabolic(n, s, kap):
"""Smear zone parameter for parabolic variation of permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh/ks).
Returns
-------
mu : float
Smear zone permeability/geometry parameter
Notes
-----
The smear zone parameter :math:`\\mu` is given by:
.. math:: \\mu = \\frac{n^2}{\\left({n^2-1}\\right)}
\\left({
\\frac{A^2}{n^2}\\mu_1+\\mu_2
}\\right)
where,
.. math:: \\mu_1=
\\frac{1}{A^2-B^2}
\\left({
s^2\\ln\\left({s}\\right)
-\\frac{1}{2}\\left({s^2-1}\\right)
}\\right)
-\\frac{1}{\\left({A^2-B^2}\\right)C^2}
\\left({
\\frac{A^2}{2}\\ln\\left({\\kappa}\\right)
+\\frac{ABE}{2}+\\frac{1}{2}-B
-\\left({A^2-B^2}\\right)\\ln\\left({\\kappa}\\right)
}\\right)
+\\frac{1}{n^2C^4}
\\left({
-\\left({\\frac{A^2}{2}+B^2}\\right)
\\ln\\left({\\kappa}\\right)
+\\frac{3ABE}{2}+\\frac{1}{2}-3B
}\\right)
.. math:: \\mu_2=
\\ln\\left({\\frac{n}{s}}\\right)
-\\frac{3}{4}
+\\frac{s^2}{n^2}\\left({1-\\frac{s^2}{4n^2}}\\right)
+A^2\\left({1-\\frac{s^2}{n^2}}\\right)
\\left[{
\\frac{1}{A^2-B^2}
\\left({
\\ln\\left({\\frac{s}{\\sqrt{\\kappa}}}\\right)
-\\frac{BE}{2A}
}\\right)
+\\frac{1}{n^2C^2}
\\left({
\\ln\\left({\\sqrt{\\kappa}}\\right)
-\\frac{BE}{2A}
}\\right)
}\\right]
where :math:`A`, :math:`B`, :math:`C` and :math:`E` are:
.. math:: A=\\sqrt{\\frac{\\kappa}{\\kappa-1}}
.. math:: B=\\frac{s}{s-1}
.. math:: C=\\frac{1}{s-1}
.. math:: E=\\ln\\left({\\frac{A+1}{A-1}}\\right)
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, Rohan, and Buddhima Indraratna. 2006. 'Vertical Drain
Consolidation with Parabolic Distribution of Permeability in
Smear Zone'. Journal of Geotechnical and Geoenvironmental
Engineering 132 (7): 937-41.
doi:10.1061/(ASCE)1090-0241(2006)132:7(937).
"""
def mu_p(n, s, kap):
"""mu for parabolic smear"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
term1 = (log(n / s) - 3 / 4 +
s ** 2 / n ** 2 * (1 - s ** 2 / (4 * n ** 2)))
term2 = (1 - s ** 2 / n ** 2) * A ** 2
term3 = 1 / (A ** 2 - B ** 2)
term4 = (log(s / sqrt(kap))) - (B / (2 * A) * log((A + 1) / (A - 1)))
term5 = 1 / (n ** 2 * C ** 2)
term6 = (log(sqrt(kap))) - (B / (2 * A) * log((A + 1) / (A - 1)))
mu2 = term1 + term2 * ((term3 * term4) + (term5 * term6))
term7 = (A ** 2 / n ** 2 * (1 / (A ** 2 - B ** 2)) * (s ** 2 * log(s)
- 1 / 2 * (s ** 2 - 1)))
term8 = -1 / (n ** 2 * C ** 2) * A ** 2 * (1 / (A ** 2 - B ** 2))
term9 = (A ** 2 / 2 * log(kap) + B * A / 2 * log((A + 1) / (A - 1))
+ 1 / 2 - B - (A ** 2 - B ** 2) * log(kap))
term12 = A ** 2 / 2 * log(kap)
term13 = (B * A / 2 * log((A + 1) / (A - 1)))
term14 = 1 / 2 - B
term15 = -(A ** 2 - B ** 2) * log(kap)
term10 = A ** 2 / (n ** 4 * C ** 4)
term11 = (-(A ** 2 / 2 + B ** 2) * (log(kap)) +
3 / 2 * A * B * log((A + 1) / (A - 1)) + 1 / 2 - 3 * B)
mu1 = term7 + (term8 * term9) + (term10 * term11)
mu = n ** 2 / (n ** 2 - 1) * (mu1 + mu2)
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n<=1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s<1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(s>n):
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)]),
', '.join([str(v) for v in np.atleast_1d(n)])))
is_array = any([isinstance(v, np.ndarray) for v in [n, s, kap]])
n = np.atleast_1d(n)
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if len([v for v in [n, s] if v.shape==kap.shape])!=2:
raise ValueError('n, s, and kap must have the same shape. You have '
'lengths for n, s, kap of {}, {}, {}.'.format(
len(n), len(s), len(kap)))
mu = np.empty_like(n, dtype=float)
ideal = np.isclose(s, 1) | np.isclose(kap, 1)
mu[ideal] = mu_ideal(n[ideal])
mu[~ideal] = mu_p(n[~ideal], s[~ideal], kap[~ideal])
if is_array:
return mu
else:
return mu[0]
def mu_piecewise_constant(s, kap, n=None, kap_m=None):
"""Smear zone parameter for piecewise constant permeability distribution
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
mu : float
Smear zone permeability/geometry parameter
Notes
-----
The smear zone parameter :math:`\\mu` is given by:
.. math:: \\mu = \\frac{n^2}{\\left({n^2-1}\\right)}
\\sum\\limits_{i=1}^{m} \\kappa_i
\\left[{
\\frac{s_i^2}{n^2}\\ln
\\left({
\\frac{s_i}{s_{i-1}}
}\\right)
-\\frac{s_i^2-s_{i-1}^2}{2n^2}
-\\frac{\\left({s_i^2-s_{i-1}^2}\\right)^2}{4n^4}
}\\right]
+\\psi_i\\frac{s_i^2-s_{i-1}^2}{n^2}
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa_j
\\left[{
\\ln
\\left({
\\frac{s_j}{s_{j-1}}
}\\right)
-\\frac{s_j^2-s_{j-1}^2}{2n^2}
}\\right]
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
References
----------
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, Rohan T. 2011. 'Vertical Drain Consolidation Analysis in
One, Two and Three Dimensions'. Computers and
Geotechnics 38 (8): 1069-77. doi:10.1016/j.compgeo.2011.07.006.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
n = s[-1]
s_ = np.ones_like(s , dtype=float)
s_[1:] = s[:-1]
sumi = 0
for i in range(len(s)):
psi = 0
for j in range(i):
psi += kap[j] * (log(s[j] / s_[j])
- 0.5 * (s[j] ** 2 / n ** 2 - s_[j] ** 2 / n ** 2))
psi /= kap[i]
sumi += kap[i] * (
s[i] ** 2 / n ** 2 * log(s[i] / s_[i])
+ (psi - 0.5) * (s[i] ** 2 / n ** 2 - s_[i] ** 2 / n ** 2)
- 0.25 * (s[i] ** 2 - s_[i] ** 2) ** 2 / n ** 4
)
mu = sumi * n ** 2 / (n ** 2 - 1)
return mu
def mu_piecewise_linear(s, kap, n=None, kap_m=None):
"""Smear zone parameter for piecewise linear permeability distribution
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
s : list or 1d ndarray of float
Ratio of radii to drain radius (r_i/r_0). The first value
of s should be 1, i.e. at the drain soil interface.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at each
value of s.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
With permeability in the ith segment defined by:
.. math:: \\frac{k_i}{k_{ref}}= \\frac{1}{\\kappa_{i-1}}
\\left({A_ir/r_w+B_i}\\right)
.. math:: A_i = \\frac{\\kappa_{i-1}/\\kappa_i-1}{s_i-s_{i-1}}
.. math:: B_i = \\frac{s_i-s_{i-1}\\kappa_{i-1}/\\kappa_i}{s_i-s_{i-1}}
the smear zone :math:`\\mu` parameter is given by:
.. math:: \\mu = \\frac{n^2}{n^2-1}
\\left[{
\\sum\\limits_{i=1}^{m}\\kappa_{i-1}\\theta_i
+ \\Psi_i
\\left({
\\frac{s_i^2-s_{i-1}^2}{n^2}
}\\right)
+\\mu_w
}\\right]
where,
.. math:: \\theta_i = \\left\\{
\\begin{array}{lr}
\\frac{s_i^2}{n^2}\\ln
\\left[{\\frac{s_i}{s_{i-1}}}\\right]
-\\frac{s_i^2-s_{i-1}^2}{2n^2}
-\\frac{\\left({s_i^2-s_{i-1}^2}\\right)^2}{4n^4}
& \\textrm{for } \\frac{\\kappa_{i-1}}{\\kappa_i}=1 \\\\
\\frac{\\left({s_i^2-s_{i-1}^2}\\right)}{3n^4}
\\left({3n^2-s_{i-1}^2-2s_{i-1}s_i}\\right)
& \\textrm{for }\\frac{\\kappa_{i-1}}{\\kappa_i}=
\\frac{s_i}{s_{i-1}} \\\\
\\begin{multline}
\\frac{s_i}{B_i n^2}\\ln\\left[{
\\frac{\\kappa_i s_i}{\\kappa_{i-1}s_{i-1}}}\\right]
-\\frac{s_i-s_{i-1}}{A_in^2}
\\left({1-\\frac{B_i^2}{A_i^2n^4}}\\right)
\\\\-\\frac{\\left({s_i-s_{i-1}}\\right)^2}{3A_in^2}
\\left({2s_i+s_{i-1}}\\right)
\\\\+\\frac{B_i}{A_i^2 n^4}\\ln\\left[{
\\frac{\\kappa_{i-1}}{\\kappa_i}}\\right]
\\left({1-\\frac{B_i^2}{A_i^2n^2}}\\right)
\\\\+\\frac{B_i}{2A_i^2 n^4}
\\left({
2s_i^2\\ln\\left[{
\\frac{\\kappa_{i-1}}{\\kappa_i}}\\right]
-s_i^2 + s_{i-1}^2
}\\right)
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
.. math:: \\Psi_i = \\sum\\limits_{j=1}^{i-1}\\kappa_{j-1}\\psi_j
.. math:: \\psi_i = \\left\\{
\\begin{array}{lr}
\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
- \\frac{s_j^2- s_{j-1}^2}{2n^2}
& \\textrm{for } \\frac{\\kappa_{j-1}}{\\kappa_j}=1 \\\\
\\frac{\\left({s_j - s_{j-1}}\\right)
\\left({n^2-s_js_{j-1}}\\right)}{s_jn^2}
& \\textrm{for }\\frac{\\kappa_{j-1}}{\\kappa_j}=
\\frac{s_j}{s_{j-1}} \\\\
\\begin{multline}
\\frac{1}{B_i}\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
+\\ln\\left[{\\frac{\\kappa_{j-1}}{\\kappa_j}}\\right]
\\left({\\frac{B_j}{A_j^2n^2}-\\frac{1}{B_j}}\\right)
\\\\-\\frac{s_j-s_{j-1}}{A_j^2n^2}
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{ref}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the radius of the ith radial point,
:math:`k_{ref}` is a convienient refernce permeability, usually
the undisturbed
horizontal permeability,
:math:`k_{hi}` is the horizontal
permeability at the ith radial point
References
----------
Derived by Rohan Walker in 2011 and 2014.
Derivation steps are the same as for mu_piecewise_constant in appendix of
[1]_ but permeability is linear in a segemetn as in [2]_.
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s < 1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) < 0):
raise ValueError('All s must satisfy s[i]>s[i-1]. You have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
if not np.isclose(s[0], 1):
raise ValueError('First value of s should be 1. You '
'have s[0]={}'.format(s[0]))
n = s[-1]
sumi = 0
for i in range(1, len(s)):
sumj = 0
for j in range(1, i):
# term1 = 0
if np.isclose(s[j - 1], s[j]):
term1=0
elif np.isclose(kap[j - 1], kap[j]):
term1 = (log(s[j] / s[j - 1])
- (s[j] ** 2 - s[j - 1] ** 2) / 2 / n ** 2)
elif np.isclose(kap[j-1] / kap[j], s[j] / s[j - 1]):
term1 = (s[j] - s[j - 1]) * (n ** 2 -
s[j - 1] * s[j]) / s[j] / n ** 2
else:
A = (kap[j-1] / kap[j] - 1) / (s[j] - s[j - 1])
B = (s[j] - kap[j-1] / kap[j] * s[j - 1]) / (s[j] - s[j - 1])
term1 = (1 / B * log(s[j] / s[j - 1])
+ (B / A ** 2 / n ** 2 - 1 / B) * log(kap[j-1] / kap[j])
- (s[j] - s[j - 1]) / A / n ** 2)
sumj += kap[j-1] * term1
# term1 = 0
if np.isclose(s[i - 1], s[i]):
term1=0
elif np.isclose(kap[i - 1], kap[i]):
term1 = (s[i] ** 2 / n ** 2 * log(s[i] / s[i - 1])
- (s[i] ** 2 - s[i - 1] ** 2) / 2 / n ** 2
- (s[i] ** 2 - s[i - 1] ** 2) ** 2 / 4 / n ** 4)
elif np.isclose(kap[i-1] / kap[i], s[i] / s[i - 1]):
term1 = ((s[i] - s[i - 1]) ** 2 / 3 / n ** 4 * (3 * n ** 2 -
s[i - 1] ** 2 - 2 * s[i - 1] * s[i]))
else:
A = (kap[i-1] / kap[i] - 1) / (s[i] - s[i - 1])
B = (s[i] - kap[i-1] / kap[i] * s[i - 1]) / (s[i] - s[i - 1])
term1 = (s[i] ** 2 / B / n ** 2 * log(kap[i] * s[i] /
kap[i-1] / s[i - 1])
- (s[i] - s[i - 1]) / A / n ** 2 *
(1 - B ** 2 / A ** 2 / n ** 2)
- (s[i] - s[i - 1]) ** 2 / 3 / A / n ** 4 *
(s[i - 1] + 2 * s[i])
+ B / A ** 2 / n ** 2 * log(kap[i-1] / kap[i]) *
(1 - B ** 2 / A ** 2 / n ** 2)
+ B / 2 / A ** 2 / n ** 4 * (s[i] ** 2 * (2 * log(kap[i-1] /
kap[i]) - 1) + s[i - 1] ** 2))
sumi += kap[i-1] * term1 + sumj * (s[i] ** 2 - s[i - 1] ** 2) / n ** 2
mu = sumi * n ** 2 / (n ** 2 - 1)
return mu
def mu_well_resistance(kh, qw, n, H, z=None):
"""Additional smear zone parameter for well resistance
Parameters
----------
kh : float
The normalising permeability used in calculating kappa for smear zone
calcs. Usually the undisturbed permeability i.e. the kh in
kappa = kh/ks
qw : float
Drain discharge capacity. qw = kw * pi * rw**2. Make sure
the kw used has the same units as kh.
n : float
Ratio of drain influence radius to drain radius (re/rw).
H : float
Length of drainage path.
z : float, optional
Evaluation depth. Default = None, in which case the well resistance
factor will be averaged.
Returns
-------
mu : float
mu parameter for well resistance
Notes
-----
The smear zone parameter :math:`\\mu_w` is given by:
.. math:: \\mu_w = \\frac{k_h}{q_w}\\pi z
\\left({2H-z}\\right)
\\left({1-\\frac{1}{n^2}}\\right)
when :math:`z` is None then the average :math:`\\mu_w` is given by:
.. math:: \\mu_{w\\textrm{average}} = \\frac{2k_h H^2}{3q_w}\\pi
\\left({1-\\frac{1}{n^2}}\\right)
where,
.. math:: n = \\frac{r_e}{r_w}
.. math:: q_w = k_w \\pi r_w^2
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`k_h` is the undisturbed horizontal permeability,
:math:`k_w` is the drain permeability
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if z is None:
mu = 2 * kh * H**2 / 3 / qw * np.pi * (1 - 1 / n**2)
else:
mu = kh / qw * np.pi * z * (2 * H - z) * (1 - 1 / n**2)
return mu
def k_parabolic(n, s, kap, si):
"""Permeability distribution for smear zone with parabolic permeability
Normalised with respect to undisturbed permeability. i.e. if you want the
actual permeability then multiply by whatever you used to determine kap.
Permeability is parabolic with value 1/kap at the drain soil interface
i.e. at s=1 k=k0=1/kap. for si>s, permeability=1.
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the permeability
i.e. si=ri/rw
Returns
-------
permeability : float or ndarray of float
Normalised permeability (i.e. ki/kh) at the si values.
Notes
-----
Parabolic distribution of permeability in smear zone is given by:
.. math:: \\frac{k_h^\\prime\\left({r}\\right)}{k_h}=
\\frac{\\kappa-1}{\\kappa}
\\left({A-B+C\\frac{r}{r_w}}\\right)
\\left({A+B-C\\frac{r}{r_w}}\\right)
where :math:`A`, :math:`B`, :math:`C` are:
.. math:: A=\\sqrt{\\frac{\\kappa}{\\kappa-1}}
.. math:: B=\\frac{s}{s-1}
.. math:: C=\\frac{1}{s-1}
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, Rohan, and Buddhima Indraratna. 2006. 'Vertical Drain
Consolidation with Parabolic Distribution of Permeability in
Smear Zone'. Journal of Geotechnical and Geoenvironmental
Engineering 132 (7): 937-41.
doi:10.1061/(ASCE)1090-0241(2006)132:7(937).
"""
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
def parabolic_part(n,s, kap, si):
"""Parbolic smear zone part i.e from si=1 to si=s"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
k0 = 1 / kap
return k0*(kap-1)*(A - B + C * si)*(A + B - C * si)
if np.isclose(s,1) or np.isclose(kap, 1):
return np.ones_like(si, dtype=float)
smear = (si < s)
permeability = np.ones_like(si, dtype=float)
permeability[smear] = parabolic_part(n, s, kap, si[smear])
return permeability
def k_linear(n, s, kap, si):
"""Permeability distribution for smear zone with linear permeability
Normalised with respect to undisturbed permeability. i.e. if you want the
actual permeability then multiply by whatever you used to determine kap.
Permeability is linear with value 1/kap at the drain soil interface
i.e. at s=1 k=k0=1/kap. for si>s, permeability=1.
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the permeability
i.e. si=ri/rw.
Returns
-------
permeability : float or ndarray of float
Normalised permeability (i.e. ki/kh) at the si values.
Notes
-----
Linear distribution of permeability in smear zone is given by:
.. math::
\\frac{k_h^\\prime\\left({r}\\right)}{k_h}=
\\left\\{\\begin{array}{lr}
\\frac{1}{\\kappa}
\\left({A\\frac{r}{r_w}+B}\\right)
& s\\neq\\kappa \\\\
\\frac{r}{\\kappa r_w}
& s=\\kappa \\end{array}\\right.
where :math:`A` and :math:`B` are:
.. math:: A=\\frac{\\kappa-1}{s-1}
.. math:: B=\\frac{s-\\kappa}{s-1}
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def s_neq_kap_part(n, s, kap, si):
"""Linear permeability in smear zome when s!=kap"""
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
k0 = 1 / kap
return k0*(A*si+B)
def s_eq_kap_part(n, s, si):
"""Linear permeability in smear zome when s!=kap"""
k0 = 1 / kap
return k0 * si
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s,1) or np.isclose(kap, 1):
return np.ones_like(si, dtype=float)
smear = (si < s)
permeability = np.ones_like(si, dtype=float)
if np.isclose(s, kap):
permeability[smear] = s_eq_kap_part(n, s, si[smear])
else:
permeability[smear] = s_neq_kap_part(n, s, kap, si[smear])
return permeability
def k_overlapping_linear(n, s, kap, si):
"""Permeability smear zone with overlapping linear permeability
Normalised with respect to undisturbed permeability. i.e. if you want the
actual permeability then multiply by whatever you used to determine kap.
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the permeability
i.e. si=ri/rw
Returns
-------
permeability : float or ndarray of float
Normalised permeability (i.e. ki/kh) at the si values.
Notes
-----
When :math:`n>s` the permeability is no different from the linear case.
When :math:`n\\leq (s+1)/2` then all the soil is disturbed
and the permeability everywhere is equal to :math:`1/\\kappa`.
When :math:`(s+1)/2<n<s` then the smear zones overlap.
the permeability for :math:`r/r_w<s_X` is given by:
.. math:: \\frac{k_h^\\prime\\left({r}\\right)}{k_h}=
\\left\\{\\begin{array}{lr}
\\frac{1}{\\kappa}
\\left({A\\frac{r}{r_w}+B}\\right)
& s\\neq\\kappa \\\\
\\frac{r}{\\kappa r_w}
& s=\\kappa \\end{array}\\right.
In the overlapping part, :math:`r/r_w>s_X`, the permeability is given by:
.. math:: k_h(r)=\\kappa_X/\\kappa
where :math:`A` and :math:`B` are:
.. math:: A=\\frac{\\kappa-1}{s-1}
.. math:: B=\\frac{s-\\kappa}{s-1}
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def mu_intersecting(n, s, kap):
"""mu for intersecting smear zones that do not completely overlap"""
sx = _sx(n, s)
kapx = _kapx(n, s, kap)
mu = mu_linear(n, sx, kapx) * kap / kapx
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s,1) or np.isclose(kap, 1):
permeability = np.ones_like(si, dtype=float)
elif (2*n-s <=1):
permeability = np.ones_like(si, dtype=float) / kap
elif (n>=s):
permeability = k_linear(n, s, kap, si)
else:
sx = _sx(n, s)
kapx = _kapx(n, s, kap)
smear = (si < sx)
permeability = np.ones_like(si, dtype=float)
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
permeability[smear] = 1/kap*(A*si[smear] + B)
permeability[~smear] = 1/kap*kapx#1 / kapx
return permeability
def u_ideal(n, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for ideal drain with no smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter
Returns
-------
u : float or ndarray of float
Pore pressure at specified si
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution is given by:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{r}{r_w}}\\right)
-\\frac{(r/r_w)^2-1}{2n^2}
+\\mu_w
}\\right]+u_w
where:
.. math:: n = \\frac{r_e}{r_w}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius.
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
mu = mu_ideal(n)
term1 = (uavg - uw) / (mu + muw)
term2 = log(si) - 1 / (2 * n**2) * (si**2 - 1) + muw
u = term1 * term2 + uw
return u
def u_constant(n, s, kap, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for constant permeability smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
Returns
-------
u : float or ndarray of float
Pore pressure at specified si
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution
in the smear zone is given by:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa\\left({
\\ln\\left({s_i}\\right)
-\\frac{1}{2n^2}\\left({s_i^2-1}\\right)
}\\right)
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
-\\frac{1}{2n^2}\\left({s_i^2-s^2}\\right)
+\\kappa\\left[{
\\ln\\left({s}\\right)
-\\frac{1}{2n^2}\\left({s^2-1}\\right)
}\\right]
+\\mu_w
}\\right]+u_w
where:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
def constant_part(n, s, kap, si):
"""u in smear zone with constant permeability i.e from si=1 to si=s"""
term2 = log(si) - 1 / (2 * n ** 2) * (si ** 2 - 1)
u = kap * term2
return u
def undisturbed_part(n, s, kap, si):
"""u outside of smear zone with constant permeability i.e from si=1 to si=s"""
term4 = (log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
+ kap * (log(s) - 1 / (2 * n ** 2) * (s ** 2 - 1)))
u = term4
return u
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s, 1) or np.isclose(kap, 1):
return u_ideal(n, si, uavg, uw, muw)
mu = mu_constant(n, s, kap)
term1 = (uavg - uw) / (mu + muw)
term2 = np.empty_like(si, dtype=float)
smear = (si < s)
term2[smear] = constant_part(n, s, kap, si[smear])
term2[~smear] = undisturbed_part(n, s, kap, si[~smear])
u = term1 * (term2 + muw) + uw
return u
def u_linear(n, s, kap, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for linear smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
Returns
-------
u : float or ndarray of float
Pore pressure at specified si.
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution
in the smear zone is given by:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa\\left({\\frac{1}{B}\\ln\\left({s_i}\\right)
+\\left({\\frac{B}{A^2n^2}-\\frac{1}{B}}\\right)
\\ln\\left({B+As_i}\\right)
+\\frac{1-s_i}{An^2}
}\\right)
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
-\\frac{s_i^2-s^2}{2n^2}
+\\kappa
\\left[{
\\frac{1}{B}\\ln\\left({s}\\right)
+\\left({\\frac{B}{A^2n^2}-\\frac{1}{B}}\\right)
\\ln\\left({\\kappa}\\right)
+\\frac{1-s}{An^2}
}\\right]
+\\mu_w
}\\right]+u_w
for the special case where :math:`s=\\kappa` the pore pressure
in the undisturbed zone is:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
s\\frac{\\left({n^2-s_i}\\right)
\\left({s_i-1}\\right)}{n^2s_i}
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
+s-1+\\frac{s}{n^2}
-\\frac{s_i^2-s^2}{2n^2}
+\\mu_w
}\\right]+u_w
where:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
If :math:`s=1` or :math:`\\kappa=1` then u_ideal will be used.
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def linear_part(n, s, kap, si):
"""u in smear zone with linear permeability i.e from si=1 to si=s"""
if np.isclose(s, kap):
term2 = -1 / si - 1 / n ** 2 * (si - 1) + 1
u = kap * term2
return u
else:
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
term2 = log(si) - log(A * si + B)
term3 = A * si + B - 1 - B * log(A * si + B)
u = (1 / B * term2 - 1 / (n ** 2 * A ** 2) * term3)
return kap * u
return u
def undisturbed_part(n, s, kap, si):
"""u outside of smear zone with linear permeability i.e from si=1 to si=s"""
if np.isclose(s, kap):
term2 = log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
term3 = -1 / s - 1 / n ** 2 * (s - 1) + 1
u = (term2 + kap * term3)
return u
else:
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
term2 = log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
term3 = (1 / B * log(s / kap) - 1 / (n ** 2 * A ** 2) *
(kap - 1 - B * log(kap)))
u = (term2 + kap * term3)
return u
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s, 1) or np.isclose(kap, 1):
return u_ideal(n, si, uavg, uw, muw)
mu = mu_linear(n, s, kap)
term1 = (uavg - uw) / (mu + muw)
term2 = np.empty_like(si, dtype=float)
smear = (si < s)
term2[smear] = linear_part(n, s, kap, si[smear])
term2[~smear] = undisturbed_part(n, s, kap, si[~smear])
u = term1 * (term2 + muw) + uw
return u
def u_parabolic(n, s, kap, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for parabolic smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
Returns
-------
u : float of ndarray of float
Pore pressure at specified si.
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution
in the smear zone is given by:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\frac{\\kappa}{\\kappa-1}\\left\\{{
\\frac{1}{A^2-B^2}
\\left({
\\ln\\left({s_i}\\right)
-\\frac{1}{2A}
\\left[{
\\left({A-B}\\right)F
+\\left({A+B}\\right)G
}\\right]
}\\right)
+\\frac{1}{2n^2AC}
\\left[{
\\left({A+B}\\right)F
+\\left({A-B}\\right)G
}\\right]
}\\right\\}
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
-\\frac{s_i^2-s^2}{2n^2}
+A^2
\\left[{
\\frac{1}{A^2-B^2}
\\left({
\\ln\\left({s}\\right)
-\\frac{1}{2}\\left[{
\\ln\\left({\\kappa}\\right)
+\\frac{BE}{A}}\\right]
}\\right)
+\\frac{1}{2n^2C^2}
\\left({\\ln\\left({\\kappa}\\right)
-\\frac{BE}{A}}\\right)
}\\right]
+\\mu_w
}\\right]+u_w
where :math:`A`, :math:`B`, :math:`C`, :math:`E`, :math:`F`, and
:math:`G` are:
.. math:: A=\\sqrt{\\frac{\\kappa}{\\kappa-1}}
.. math:: B=\\frac{s}{s-1}
.. math:: C=\\frac{1}{s-1}
.. math:: E=\\ln\\left({\\frac{A+1}{A-1}}\\right)
.. math:: F(r/r_w) = \\ln\\left({\\frac{A+B-Cs_i}{A+1}}\\right)
.. math:: G(r/r_w) = \\ln\\left({\\frac{A-B+Cs_i}{A-1}}\\right)
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, Rohan, and Buddhima Indraratna. 2006. 'Vertical Drain
Consolidation with Parabolic Distribution of Permeability in
Smear Zone'. Journal of Geotechnical and Geoenvironmental
Engineering 132 (7): 937-41.
doi:10.1061/(ASCE)1090-0241(2006)132:7(937).
"""
def parabolic_part(n, s, kap, si):
"""u in smear zone with parabolic permeability i.e from si=1 to si=s"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
E = log((A + 1)/(A - 1))
F = log((A + B - C * si) / (A + 1))
G = log((A - B + C * si) / (A - 1))
term1 = kap / (kap - 1)
term2 = 1 / (A ** 2 - B ** 2)
term3 = log(si)
term4 = -1 / (2 * A)
term5 = (A - B) * F + (A + B) * G
term6 = term2 * (term3 + term4 * term5)
term7 = 1 / (2 * n ** 2 * A * C ** 2)
term8 = (A + B) * F + (A - B) * G
term9 = term7 * term8
u = term1 * (term6 + term9)
return u
def undisturbed_part(n, s, kap, si):
"""u outside of smear zone with parabolic permeability i.e from si=1 to si=s"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
E = log((A + 1)/(A - 1))
term1 = 1
term2 = log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
term3 = 1 / (A ** 2 - B ** 2)
term4 = log(s) - 1 / 2 * (log(kap) + B / A * E)
term5 = 1 / (2 * n ** 2 * C ** 2)
term6 = (log(kap) - B / A * E)
term7 = kap / (kap - 1) * (term3 * term4 + term5 * term6)
u = term1 * (term2 + term7)
return u
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s, 1) or np.isclose(kap, 1):
return u_ideal(n, si, uavg, uw, muw)
mu = mu_parabolic(n, s, kap)
term1 = (uavg - uw) / (mu + muw)
term2 = np.empty_like(si, dtype=float)
smear = (si < s)
term2[smear] = parabolic_part(n, s, kap, si[smear])
term2[~smear] = undisturbed_part(n, s, kap, si[~smear])
u = term1 * (term2 + muw) + uw
return u
def u_piecewise_constant(s, kap, si, uavg=1, uw=0, muw=0, n=None, kap_m=None):
"""Pore pressure at radius for piecewise constant permeability distribution
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
u : float of ndarray of float
Pore pressure at specified si.
Notes
-----
The pore pressure in the ith segment is given by:
.. math:: u_i(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa_i\\left({\\ln\\left({\\frac{r}{r_{i-1}}}\\right)
-\\frac{r^2/r_0^2-s_{i-1}^2}{2n^2}}\\right)
+\\psi_i+\\mu_w
}\\right]+u_w
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa_j
\\left[{
\\ln
\\left({
\\frac{s_j}{s_{j-1}}
}\\right)
-\\frac{s_j^2-s_{j-1}^2}{2n^2}
}\\right]
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
References
----------
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, Rohan T. 2011. 'Vertical Drain Consolidation Analysis in
One, Two and Three Dimensions'. Computers and
Geotechnics 38 (8): 1069-77. doi:10.1016/j.compgeo.2011.07.006.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
n = s[-1]
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= s[-1])')
s_ = np.ones_like(s)
s_[1:] = s[:-1]
u = np.empty_like(si, dtype=float )
segment = np.searchsorted(s, si)
mu = mu_piecewise_constant(s, kap)
term1 = (uavg - uw) / (mu + muw)
for ii, i in enumerate(segment):
sumj = 0
for j in range(i):
sumj += (kap[j] * (log(s[j] / s_[j])
- 0.5 * (s[j] ** 2 / n ** 2 - s_[j] ** 2 / n ** 2)))
sumj = sumj / kap[i]
u[ii] = kap[i] * (
log(si[ii] / s_[i])
- 0.5 * (si[ii] ** 2 / n ** 2 - s_[i] ** 2 / n ** 2)
+ sumj
) + muw
u *= term1
u += uw
return u
def u_piecewise_linear(s, kap, si, uavg=1, uw=0, muw=0, n=None, kap_m=None):
"""Pore pressure at radius for piecewise constant permeability distribution
Parameters
----------
s : list or 1d ndarray of float
Ratio of radii to drain radius (r_i/r_0). The first value
of s should be 1, i.e. at the drain soil interface.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at each
value of s.
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
u : float or ndarray of float
Pore pressure at specified si.
Notes
-----
With permeability in the ith segment defined by:
.. math:: \\frac{k_i}{k_{ref}}= \\frac{1}{\\kappa_{i-1}}
\\left({A_ir/r_w+B_i}\\right)
.. math:: A_i = \\frac{\\kappa_{i-1}/\\kappa_i-1}{s_i-s_{i-1}}
.. math:: B_i = \\frac{s_i-s_{i-1}\\kappa_{i-1}/\\kappa_i}{s_i-s_{i-1}}
The pore pressure in the ith segment is given by:
.. math:: u_i(s) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\sum\\limits_{i=1}^{m}\\kappa_{i-1}\\phi_i
+ \\Psi_i
+\\mu_w
}\\right]+u_w
where,
.. math:: \\phi_i = \\left\\{
\\begin{array}{lr}
\\ln\\left[{\\frac{s}{s_{i-1}}}\\right]
- \\frac{s^2- s_{i-1}^2}{2n^2}
& \\textrm{for } \\frac{\\kappa_{i-1}}{\\kappa_i}=1 \\\\
\\frac{\\left({s - s_{i-1}}\\right)
\\left({n^2-ss_{i-1}}\\right)}{sn^2}
& \\textrm{for }\\frac{\\kappa_{i-1}}{\\kappa_i}=
\\frac{s_i}{s_{i-1}} \\\\
\\begin{multline}
\\frac{1}{B_i}\\ln\\left[{\\frac{s}{s_{i-1}}}\\right]
+\\ln\\left[{A_is+B_i}\\right]
\\left({\\frac{B_i}{A_i^2n^2}-\\frac{1}{B_i}}\\right)
\\\\-\\frac{s-s_{i-1}}{A_i^2n^2}
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
.. math:: \\Psi_i = \\sum\\limits_{j=1}^{i-1}\\kappa_{j-1}\\psi_j
.. math:: \\psi_i = \\left\\{
\\begin{array}{lr}
\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
- \\frac{s_j^2- s_{j-1}^2}{2n^2}
& \\textrm{for } \\frac{\\kappa_{j-1}}{\\kappa_j}=1 \\\\
\\frac{\\left({s_j - s_{j-1}}\\right)
\\left({n^2-s_js_{j-1}}\\right)}{s_jn^2}
& \\textrm{for }\\frac{\\kappa_{j-1}}{\\kappa_j}=
\\frac{s_j}{s_{j-1}} \\\\
\\begin{multline}
\\frac{1}{B_i}\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
+\\ln\\left[{\\frac{\\kappa_{j-1}}{\\kappa_j}}\\right]
\\left({\\frac{B_j}{A_j^2n^2}-\\frac{1}{B_j}}\\right)
\\\\-\\frac{s_j-s_{j-1}}{A_j^2n^2}
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{ref}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the radius of the ith radial point,
:math:`k_{ref}` is a convienient refernce permeability, usually
the undisturbed
horizontal permeability,
:math:`k_{hi}` is the horizontal
permeability at the ith radial point
References
----------
Derived by Rohan Walker in 2011 and 2014.
Derivation steps are the same as for mu_piecewise_constant in appendix of
[1]_ but permeability is linear in a segemetn as in [2]_.
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) < 0):
raise ValueError('s must increase left to right. you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
n = s[-1]
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= s[-1])')
s_ = np.ones_like(s)
s_[1:] = s[:-1]
u = np.empty_like(si, dtype=float)
segment = np.searchsorted(s, si)
segment[segment==0] = 1 # put si=1 in first segment
mu = mu_piecewise_linear(s, kap)
term1 = (uavg - uw) / (mu + muw)
for ii, i in enumerate(segment):
#phi
if np.isclose(kap[i-1]/kap[i], 1.0):
phi = log(si[ii]/s[i-1]) - (si[ii]**2 - s[i-1]**2)/(2 * n**2)
elif np.isclose(kap[i-1]/kap[i], s[i]/s[i-1]):
phi = (si[ii]-s[i-1]) * (n**2 - s[i-1]*si[ii]) / (si[ii] * n**2)
else:
A = (kap[i-1] / kap[i] - 1) / (s[i] - s[i-1])
B = (s[i] - s[i-1] * kap[i-1] / kap[i])/ (s[i] - s[i-1])
phi = (1/B * log(si[ii]/s[i-1])
+ (B/A**2/n**2 - 1/B) * log(A*si[ii] + B)
- (si[ii]-s[i-1])/A/n**2)
psi = 0
for j in range(1, i):
if np.isclose(s[j - 1], s[j]):
pass
elif np.isclose(kap[j-1]/kap[j], 1.0):
psi += kap[j-1]*(log(s[j]/s[j-1]) - (s[j]**2 - s[j-1]**2)/(2 * n**2))
elif np.isclose(kap[j-1]/kap[j], s[j]/s[j-1]):
psi += kap[j-1]*((s[j]-s[j-1]) * (n**2 - s[j-1]*s[j]) / (s[j] * n**2))
else:
A = (kap[j-1] / kap[j]-1) / (s[j] - s[j-1])
B = (s[j] - s[j-1] * kap[j-1] / kap[j])/ (s[j] - s[j-1])
psi += kap[j-1]*((1/B * log(s[j]/s[j-1])
+ (B/A**2/n**2 - 1/B) * log(A*s[j] + B)
- (s[j]-s[j-1])/A/n**2))
u[ii]=kap[i-1] * phi + psi + muw
u *= term1
u += uw
return u
def re_from_drain_spacing(sp, pattern = 'Triangle'):
"""Calculate drain influence radius from drain spacing
Parameters
----------
sp : float
Distance between drain centers.
pattern : ['Triangle', 'Square'], optional
Drain installation pattern. default = 'Triangle'.
Returns
-------
re : float
drain influence radius
Notes
-----
The influence radius, :math:`r_e`, is given by:
.. math:: r_e =
\\left\\{\\begin{array}{lr}
S_p \\frac{1}{\\sqrt{\\pi}}=S_p\\times 0.564189583
& \\textrm{square pattern}\\\\
S_p \\sqrt{\\frac{\\sqrt{3}}{2\\pi}}=S_p\\times 0.525037567
& \\textrm{triangular pattern}
\\end{array}\\right.
References
----------
Eta method is described in [1]_.
.. [1] Walker, Rohan T. 2011. 'Vertical Drain Consolidation Analysis in
One, Two and Three Dimensions'. Computers and
Geotechnics 38 (8): 1069-77. doi:10.1016/j.compgeo.2011.07.006.
"""
if np.any(np.atleast_1d(sp) <= 0):
raise ValueError('sp must be greater than zero. '
'You have sp={}'.format(sp))
if pattern[0].upper()=='T':
re = 0.525037567904332 * sp # factor = (3**0.5/2/np.pi)**0.5
elif pattern[0].upper()=='S':
re = 0.5641895835477563 * sp #factor = 1 / np.pi**0.5
else:
raise ValueError("pattern must begin with 'T' for triangular "
" or 'S' for square. You have pattern="
"{}".format(pattern))
return re
def drain_eta(re, mu_function, *args, **kwargs):
"""Calculate the vertical drain eta parameter for a specific smear zone
eta = 2 / re**2 / (mu+muw)
eta is used in radial consolidation equations u= u0 * exp(-eta*kh/gamw*t)
Parameters
----------
re : float
Drain influence radius.
mu_function : obj or string
The mu_funtion to use. e.g. mu_ideal, mu_constant, mu_linear,
mu_overlapping_linear, mu_parabolic, mu_piecewise_constant,
mu_piecewise_linear. This can either be the function object itself
or the name of the function e.g. 'mu_ideal'.
muw : float, optional
Well resistance mu term, default=0.
*args, **kwargs : various
The arguments to pass to the mu_function.
Returns
-------
eta : float
Value of eta parameter
Examples
--------
>>> drain_eta(1.5, mu_ideal, 10)
0.563178340433...
>>> drain_eta(1.5, 'mu_ideal', 10)
0.5631783404334...
>>> drain_eta(1.5, mu_constant, 5, 1.5, 1.6, muw=1)
0.4115837724144...
"""
try:
mu_fn = globals()[mu_function]
except KeyError:
mu_fn = mu_function
muw = kwargs.pop('muw', 0)
eta = 2 / re**2 / (mu_fn(*args, **kwargs)+muw)
return eta
def back_calc_drain_spacing_from_eta(eta, pattern, mu_function, rw, s, kap, muw=0):
"""Back calculate the required drain spacing to achieve a given eta
eta = 2 / re**2 / (mu + muw)
eta is used in radial consolidation equations u= u0 * exp(-eta*kh/gamw*t)
Parameters
----------
eta : float
eta value.
pattern : ['Triangle', 'Square']
Drain installation pattern.
mu_function : obj
The mu_funtion to use. e.g. mu_ideal, mu_constant, mu_linear,
mu_overlapping_linear, mu_parabolic, mu_piecewise_constant,
mu_piecewise_linear.
rw : float
Drain/well radius.
s : float or 1d array_like of float
Ratio of smear zone radius to drain radius (rs/rw). s can only be
a 1d array is using a mu_piecewise function
kap : float or 1d array_like of float
Ratio of undisturbed horizontal permeability to permeability at
in smear zone (kh / ks) (often at the drain-soil interface). Be
careful when defining s and kap for mu_piecewise_constant, and
mu_piecewise_linear because the last value of kap will be used at
the influence drain periphery. In general the last value of kap
should be one, representing the start of the undisturbed zone.
muw : float, optional
Well resistance mu term, default=0.
Returns
-------
sp : float
Drain spacing to get the required eta value
re : float
Drain influence radius
n : float
Ratio of drain influence radius to drain radius, re/rw
Notes
-----
When using mu_piecewise_linear or mu_piecewise_constant only define s and
kap up to the start of the undisturbed zone. re will be varied.
For anyting other than mu_overlapping_linear do not trust any returned
spacing that gives an n value less than the extent of the smear zone.
"""
def calc_eta(sp, eta, rw, s, kap, mu_function, pattern, muw=0):
"""eta from a given spacing value
used in root finding
"""
re = re_from_drain_spacing(sp, pattern)
n = re/rw
if mu_function != mu_ideal:
if n < np.max(s):
if mu_function != mu_overlapping_linear:
raise ValueError('In determining required drain '
'spacing, n has fallen '
'below s. s={}, n={}'.format(np.max(s), n))
if mu_function in [mu_piecewise_constant, mu_piecewise_linear]:
eta_ = drain_eta(re, mu_function, s, kap, n = n, muw = muw)
else:
eta_ = drain_eta(re, mu_function, n, s, kap, muw=muw)
return eta_ - eta
from scipy.optimize import fsolve
if not mu_function in [mu_piecewise_constant, mu_piecewise_linear]:
if len(np.atleast_1d(s))>1:
raise ValueError('for mu_function={}, you cannot have multiple '
'values for s. s={}'.format(mu_function.__name__, s))
if len(np.atleast_1d(kap))>1:
raise ValueError('for mu_function={}, you cannot have multiple '
'values for kap. kap={}'.format(mu_function.__name__, kap))
x0 = rw * np.max(s) / 0.5 * 2 # this ensures guess is beyond smear zone
calc_eta(x0, eta, rw, s, kap, mu_function, pattern, muw )
sp = fsolve(calc_eta, x0,
args=(eta, rw, s, kap, mu_function, pattern, muw))
re = re_from_drain_spacing(sp[0], pattern)
n = re/rw
if mu_function != mu_ideal:
if n < np.max(s):
if mu_function != mu_overlapping_linear:
raise ValueError('calculated spacing results in n<s. s={}, n={}'.format(np.max(s), n))
return sp[0], re, n
def _g(r_rw, re_rw, nflow=1.0001, nterms=20):
"""Non-darcian equal strain radial consolidation term
Parameters
----------
r_rw : float
Ratio of radial coordinate to drain radius (r/rw).
re_rw : float
Ratio of drain influence radius to drain readius (re/rw).
You will often see this ratio expressed as re/re=n. However, this is
confusing with the non-darcian flow exponent.
nflow : float, optional
Non-Darcian flow exponent. Default nflow=1.0001 i.e. darcian flow.
Using nflow=1 will result in an error.
nterms : int, optional
Number of summation terms. Default nterms=20.
Returns
-------
g : float
Non-darcian equal strain radial consolidation term.
Notes
-----
The 'g' function arises in the derivation of equal strain radial
consolidation equations under non-Darcian flow.
We only concern ourselves with the exponential part of Hansbo's
Non-darcian flow relationship:
.. math::
v=k^{\\ast}i^{n}
where,
:math:`k^{\\ast}` is a peremability, :math:`i` is hydraulic gradient and
:math:`n` is the flow exponent.
The expression :math:`g\\left({y}\\right)` is given below. :math:`y` is
the ratio of radial coordinate :math:`r` to drain radius :math:`r_w`,
:math:`y=r/r_w`. :math:`N` is the ratio of influence radius :math:`r_e`
to drain radius :math:`r_w`, :math:`N=r_e/r_w`.
.. math::
g\\left({y}\\right)=
ny^{1-1/n}\sum\limits_{j=0}^\\infty
\\frac{\\left\\{{-1/n}\\right\\}_j}
{j!\\left({\\left({2j+1}\\right)n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2j}
:math:`\\left\\{x\\right\\}_m` is the Pochhammer symbol or rising
factorial given by:
.. math::
\\left\\{x\\right\\}_m = x
\\left({x+1}\\right)
\\left({x+2}\\right)
\\dots
\\left({x+m-1}\\right)
.. math::
\\left\\{x\\right\\}_0=1
Alterantely a recurrance relatoin can be formed:
.. math::
g\\left({y}\\right)=
\sum\limits_{j=0}^{\\infty} a_j
where,
.. math::
a_0=\\frac{n}{n-1}y^{1-1/n}
.. math::
a_j = a_{j-1}
\\frac{\\left({jn-n-1}\\right)\\left({2jn-n-1}\\right)}
{nj\\left({2jn+n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2}
Examples
--------
>>> _g(10.0, 50.0, nflow=1.2)
8.7841...
>>> _g(10.0, 20.0, nflow=1.2)
8.664...
>>> _g(2, 50.0, nflow=1.01)
101.694...
>>> _g(5, 5, nflow=1.01)
102.120...
>>> _g(10.0, np.array([50.0,20]), nflow=1.2)
array([8.7841..., 8.664...])
See Also
--------
_gbar : multiply _g by y and integrate w.r.t y
"""
r_rw = np.asarray(r_rw)
re_rw = np.asarray(re_rw)
nflow = np.asarray(nflow)
if np.any(r_rw < 1):
raise ValueError('r_rw must be greater or equal to 1. '
'You have r_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(r_rw)])))
if np.any(re_rw <= 1):
raise ValueError('re_rw must be greater than 1. '
'You have re_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(re_rw)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
if np.any([len(np.asarray(v).shape)>0 for v in
[r_rw, re_rw, nflow, nterms]]):
#array inputs, use series loop
g = 0
term1 = nflow * r_rw**(1-1.0/nflow)
for j in range(nterms):
term2 = special.poch(-1.0 / nflow, j)
term3 = np.math.factorial(j)
term4 = (2 * j + 1) * nflow - 1
term5 = (r_rw / re_rw)**(2 * j)
g += term2 / term3 / term4 * term5
g *= term1
return g
else:
#scalar inputs, use recursion relationship
a = np.zeros(nterms)
a[0] = nflow / (nflow - 1.0) * r_rw**(1.0 - 1.0 / nflow)
j = np.arange(1, nterms)
a[1:] = (r_rw / re_rw)**2
a[1:] *= (j * nflow - nflow - 1)
a[1:] *= (2* j * nflow - nflow - 1)
a[1:] /= nflow * j * (2 * j * nflow + nflow - 1)
np.cumprod(a, out=a)
g=np.sum(a)
return g
def _gbar(r_rw, re_rw, nflow=1.0001, nterms=20):
"""Non-darcian equal strain radial consolidation term
_g expression multiplied by y and integrated w.r.t. y
Parameters
----------
r_rw : float
Ratio of radial coordinate to drain radius (r/rw).
re_rw : float
Ratio of drain influence radius to drain readius (re/rw).
You will often see this ratio expressed as re/re=n. However, this is
confusing with the non-darcian flow exponent.
nflow : int, optional
Non-Darcian flow exponent. Default nflow=1.0001 i.e. darcian flow.
Using nflow=1 will result in an error.
nterms : float, optional
Number of summation terms. Default nterms=20.
Returns
-------
gbar : float
Non-darcian equal strain radial consolidation term.
Notes
-----
The 'gbar' (bar stands for overbar) function arises in the derivation
of equal strain radial consolidation equations under non-Darcian flow.
We only concern ourselves with the exponential part of Hansbo's
Non-darcian flow relationship:
.. math::
v=k^{\\ast}i^{n}
where,
:math:`k^{\\ast}` is a peremability, :math:`i` is hydraulic gradient and
:math:`n` is the flow exponent.
The expression :math:`g\\left({y}\\right)` is given below. :math:`y` is
the ratio of radial coordinate :math:`r` to drain radius :math:`r_w`,
:math:`y=r/r_w`. :math:`N` is the ratio of influence radius :math:`r_e`
to drain radius :math:`r_w`, :math:`N=r_e/r_w`.
.. math::
\\overline{g}\\left({y}\\right)=
n^2y^{3-1/n}\sum\limits_{j=0}^\\infty
\\frac{\\left\\{{-1/n}\\right\\}_j}
{j!\\left({\\left({2j+1}\\right)n-1}\\right)
\\left({\\left({2j+3}\\right)n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2j}
:math:`\\left\\{x\\right\\}_m` is the Pochhammer symbol or rising
factorial given by:
.. math::
\\left\\{x\\right\\}_m = x
\\left({x+1}\\right)
\\left({x+2}\\right)
\\dots
\\left({x+m-1}\\right)
.. math::
\\left\\{x\\right\\}_0=1
Alterantely a recurrance relatoin can be formed:
.. math::
\\overline{g}\\left({y}\\right)=
\sum\limits_{j=0}^{\\infty} a_j
where,
.. math::
a_0=\\frac{n^2}{\\left({n-1}\\right)\\left({3n-1}\\right)}y^{3-1/n}
.. math::
a_j = a_{j-1}
\\frac{\\left({jn-n-1}\\right)\\left({2jn-n-1}\\right)}
{nj\\left({2jn+3n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2}
Examples
--------
>>> _gbar(10.0, 50.0, nflow=1.2)
405.924...
>>> _gbar(10.0, 20.0, nflow=1.2)
403.0541...
>>> _gbar(2, 50.0, nflow=1.01)
202.3883...
>>> _gbar(5, 5, nflow=1.01)
1273.3329...
>>> _gbar(10.0, np.array([50.0,20]), nflow=1.2)
array([405.924..., 403.0541...])
See Also
--------
_g : earlier step in derivation of `_gbar`.
"""
r_rw = np.asarray(r_rw)
re_rw = np.asarray(re_rw)
nflow = np.asarray(nflow)
if np.any(r_rw < 1):
raise ValueError('r_rw must be greater or equal to 1. '
'You have r_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(r_rw)])))
if np.any(re_rw <= 1):
raise ValueError('re_rw must be greater than 1. '
'You have re_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(re_rw)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
if np.any([len(np.asarray(v).shape)>0 for v in
[r_rw, re_rw, nflow, nterms]]):
#array inputs, use series loop
gbar = 0
term1 = nflow**2 * r_rw**(3 - 1.0/nflow)
for j in range(nterms):
term2 = special.poch(-1.0 / nflow, j)
term3 = np.math.factorial(j)
term4 = (2 * j + 1) * nflow - 1
term4a = (2 * j + 3) * nflow - 1
term5 = (r_rw/re_rw)**(2*j)
gbar += term2/term3/term4/term4a*term5
gbar *= term1
return gbar
else:
a = np.zeros(nterms)
a[0] = nflow**2 / (nflow - 1.0)/(3 * nflow - 1.0) * r_rw**(3.0 - 1.0 / nflow)
j = np.arange(1, nterms)
a[1:] = (r_rw / re_rw)**2
a[1:] *= (j * nflow - nflow - 1)
a[1:] *= (2* j * nflow - nflow - 1)
a[1:] /= nflow * j * (2 * j * nflow + 3 * nflow - 1)
np.cumprod(a, out=a)
gbar=np.sum(a)
return gbar
def non_darcy_beta_ideal(n, nflow=1.0001, nterms=20, *args):
"""Non-darcian flow smear zone permeability/geometry parameter for
ideal drain (no smear).
beta parameter is in equal strain radial consolidation equations with
non-Darcian flow.
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
args : anything
`args` does not contribute to any calculations it is merely so you
can have other arguments such as s and kappa which are used in other
smear zone formulations.
Returns
-------
beta : float
Smear zone permeability/geometry parameter.
Notes
-----
.. math::
\\beta = \\frac{1}{N^2-1}
\\left({
2\\overline{g}\\left({N}\\right)
-2\\overline{g}\\left({1}\\right)
-g\\left({1}\\right) \\left({N^2-1}\\right)
}\\right)
:math:`g\\left({y}\\right)` and :math:`\\overline{g}\\left({y}\\right)`
are described in the `_g` and `_gbar` functions respectively.
.. math:: n = \\frac{r_e}{r_w}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius.
Examples
--------
>>> non_darcy_beta_ideal(20, 1.000001, nterms=20)
2.2538...
>>> non_darcy_beta_ideal(np.array([20, 10]), 1.000001, nterms=20)
array([2.253..., 1.578...])
>>> non_darcy_beta_ideal(15, 1.3)
2.618...
>>> non_darcy_beta_ideal(np.array([20, 15]), np.array([1.000001,1.3]), nterms=20)
array([2.253..., 2.618...])
See Also
--------
_g : used in this function.
_gbar : used in this function.
References
----------
.. [1] Hansbo, S. 1981. "Consolidation of Fine-Grained Soils by
Prefabricated Drains". In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
.. [2] Walker, R., B. Indraratna, and C. Rujikiatkamjorn.
"Vertical Drain Consolidation with Non-Darcian Flow and
Void-Ratio-Dependent Compressibility and Permeability."
Geotechnique 62, no. 11 (November 1, 2012): 985-97.
doi:10.1680/geot.10.P.084.
"""
n = np.asarray(n)
nflow = np.asarray(nflow)
if np.any(n <= 1):
raise ValueError('n must be greater than 1. '
'You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
# if nflow==1:
# raise ValueError('nflow must not be 1.')
## if n <= 1:
## raise ValueError('n must be greater than 1. You have n = {}'.format(
## n))
# if np.any(n <= 1):
# raise ValueError('n must be greater than 1. You have n = {}'.format(
# ', '.join([str(v) for v in np.atleast_1d(n)])))
# beta = _g(1, n, nflow, nterms)
# beta *= n**2 - 1
# beta += 2 * _gbar(n, n, nflow, nterms)
# beta -= 2 * _gbar(1, n, nflow, nterms)
# beta /= n**2 - 1
# g = _g2
# gbar = _gbar2
beta = -_g(1, n, nflow, nterms)
beta *= n**2 - 1
beta += 2 * _gbar(n, n, nflow, nterms)
beta -= 2 * _gbar(1, n, nflow, nterms)
beta /= n**2 - 1
return beta
def non_darcy_beta_constant(n, s, kap, nflow=1.0001, nterms=20, *args):
"""Non-darcian flow smear zone permeability/geometry parameter for
smear zone with constant permeability.
beta parameter is in equal strain radial consolidation equations with
non-Darcian flow.
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw)
kap : float or ndarray of float.
Ratio of undisturbed horizontal permeability to smear zone
horizontal permeanility (kh / ks).
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
args : anything
`args` does not contribute to any calculations it is merely so you
can have other arguments such as s and kappa which are used in other
smear zone formulations.
Returns
-------
beta : float
Smear zone permeability/geometry parameter.
Notes
-----
.. math::
\\beta = \\frac{1}{N^2-1}
\\left({
\\begin{multline}
2\\overline{g}\\left({N}\\right)
-\\kappa^{1/n}\\left({
2\\overline{g}\\left({1}\\right)
+ g\\left({1}\\right) \\left({N^2-1}\\right)
}\\right) \\\\
+\\left({\\kappa^{1/n}-1}\\right)\\left({
2\\overline{g}\\left({s}\\right)
+ g\\left({s}\\right) \\left({N^2-s^2}\\right)
}\\right)
\\end{multline}
}\\right)
:math:`g\\left({y}\\right)` and :math:`\\overline{g}\\left({y}\\right)`
are described in the `_g` and `_gbar` functions respectively.
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability.
Examples
--------
>>> non_darcy_beta_constant(20,1,1, 1.000001, nterms=20)
2.2538...
>>> non_darcy_beta_constant(20,5,5, 1.000001, nterms=20)
8.4710...
>>> non_darcy_beta_constant(15, 5, 4, 1.3, nterms=20)
6.1150...
>>> non_darcy_beta_constant(np.array([20, 15]), 5,
... np.array([5,4]), np.array([1.000001, 1.3]), nterms=20)
array([8.471..., 6.1150...])
See Also
--------
_g : used in this function.
_gbar : used in this function.
References
----------
.. [1] Hansbo, S. 1981. "Consolidation of Fine-Grained Soils by
Prefabricated Drains". In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
.. [2] Walker, R., B. Indraratna, and C. Rujikiatkamjorn.
"Vertical Drain Consolidation with Non-Darcian Flow and
Void-Ratio-Dependent Compressibility and Permeability."
Geotechnique 62, no. 11 (November 1, 2012): 985-97.
doi:10.1680/geot.10.P.084.
"""
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
nflow = np.asarray(nflow)
if np.any(n <= 1):
raise ValueError('n must be greater than 1. '
'You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s < 1):
raise ValueError('s must be greater or equal to 1. '
'You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap < 1):
raise ValueError('kap must be greater or equal to 1. '
'You have kap = {}'.format(
', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
beta = 2 * _gbar(n, n, nflow, nterms)
beta -= kap**(1 / nflow) * (
2 * _gbar(1, n, nflow, nterms)
+ _g(1, n, nflow, nterms) * (n**2 - 1))
beta += (kap**(1 / nflow) - 1) * (
2 * _gbar(s, n, nflow, nterms)
+ _g(s, n, nflow, nterms) * (n**2 - s**2))
beta /= n**2 - 1
return beta
def non_darcy_beta_piecewise_constant(s, kap, n=None, kap_m=None,
nflow=1.0001, nterms=20, *args):
"""Non-darcian flow smear zone permeability/geometry parameter for
smear zone with piecewise constant permeability.
beta parameter is in equal strain radial consolidation equations with
non-Darcian flow.
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soil permeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
Returns
-------
beta : float
Smear zone permeability/geometry parameter.
Notes
-----
The non-darcian smear zone parameter :math:`\\beta` is given by:
.. math:: \\beta = \\frac{1}{\\left({n^2-1}\\right)}
\\sum\\limits_{i=1}^{m} \\kappa^{1/n}_i
\\left[{
2\\overline{g}\\left({s_i}\\right)
-2\\overline{g}\\left({s_{i-1}}\\right)
}\\right]
+\\psi_i \\left({s_i^2-s_{i-1}^2}\\right)
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa^{1/n}_j
\\left[{
g\\left({s_j}\\right)
-g\\left({s_{j-1}}\\right)
}\\right]
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
Examples
--------
>>> mu_piecewise_constant([1.5, 3, 4],[2, 3, 1], n=5)
2.2533...
>>> non_darcy_beta_piecewise_constant(s=np.array([1.5, 3, 4]),
... kap=np.array([2, 3, 1]), n=5, nflow=1.000001)
2.2533...
References
----------
None because it is new.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
n = s[-1]
s_ = np.ones_like(s , dtype=float)
s_[1:] = s[:-1]
sumi = 0
for i in range(len(s)):
psi = 0
for j in range(i):
psi+= kap[j]**(1 / nflow) *(
_g(s[j], n, nflow, nterms)
- _g(s_[j], n, nflow, nterms)
)
psi /= kap[i]**(1 / nflow)
sumi += kap[i]**(1 / nflow) * (
2 * _gbar(s[i], n, nflow, nterms)
- 2 * _gbar(s_[i], n, nflow, nterms)
+(psi - _g(s_[i], n, nflow, nterms) )* (s[i]**2 - s_[i]**2)
)
beta = sumi / (n**2 - 1)
return beta
def non_darcy_u_piecewise_constant(s, kap, si, uavg=1, uw=0, muw=0,
n=None, kap_m=None,
nflow=1.0001, nterms=20):
"""Pore pressure at radius for piecewise constant permeability distribution
.. warning::
`muw` must always be zero. i.e. no well resistance (It exists to have
the same inputs as `u_piecewise_constant`.
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter. Default = 0
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
Returns
-------
u : float of ndarray of float
Pore pressure at specified si.
Notes
-----
non_darcy_u_piecewise_constant()
The pore pressure in the ith segment is given by:
.. math:: u_i(y) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa^{1/n}_i
\\left({
g\\left({y}\\right)
-g\\left({s_{i-1}}\\right)
}\\right)
+\\psi_i
}\\right]+u_w
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa^{1/n}_j
\\left[{
g\\left({s_j}\\right)
-g\\left({s_{j-1}}\\right)
}\\right]
and:
:math:`g\\left({y}\\right)` is described in the `_g` function
.. math:: y = \\frac{r}{r_0}
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
Examples
--------
>>> u_piecewise_constant([1.5, 3,], [2, 3], 1.6, n=5, kap_m=1)
array([0.4153...])
>>> non_darcy_u_piecewise_constant([1.5, 3,], [2, 3], 1.6, n=5, kap_m=1,
... nflow=1.0000001)
array([0.4153...])
>>> non_darcy_u_piecewise_constant([1.5, 3,], [2, 3], 1.6, n=5, kap_m=1,
... nflow=1.3)
array([0.3865...])
References
----------
none because it is new.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
n = s[-1]
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= s[-1])')
s_ = np.ones_like(s)
s_[1:] = s[:-1]
u = np.empty_like(si, dtype=float )
segment = np.searchsorted(s, si)
beta = non_darcy_beta_piecewise_constant(s, kap,
nflow=nflow, nterms=nterms)
term1 = (uavg - uw) / (beta + muw)
for ii, i in enumerate(segment):
psi = 0
for j in range(i):
psi+= kap[j]**(1 / nflow) *(
_g(s[j], n, nflow, nterms)
- _g(s_[j], n, nflow, nterms)
)
psi /= kap[i]**(1 / nflow)
# sumj += (kap[j] * (log(s[j] / s_[j])
# - 0.5 * (s[j] ** 2 / n ** 2 - s_[j] ** 2 / n ** 2)))
# sumj = sumj / kap[i]
u[ii] = kap[i]**(1 / nflow) * (
_g(si[ii], n, nflow, nterms)
-_g(s_[i], n, nflow, nterms)
+ psi
) + muw
# u[ii] = kap[i] * (
# log(si[ii] / s_[i])
# - 0.5 * (si[ii] ** 2 / n ** 2 - s_[i] ** 2 / n ** 2)
# + sumj
# ) + muw
u *= term1
u += uw
return u
def non_darcy_drain_eta(re, iL, gamw, beta_function, *args, **kwargs):
"""For non-Darcy flow calculate the vertical drain eta parameter
eta = 2 / (re**2 * beta**nflow * (rw * gamw)**(nflow-1) * nflow * iL**(nflow-1))
nflow will be obtained from the **kwargs. rw will be back calculated
from the n parameter (n=re/rw) which is usually the first of the *arg parameters
or one of the **kwargs
Note that eta is used in radial consolidation equations:
[strain rate] = (u - uw)**n * k / gamw * eta
Compare with the Darcian case of (eta terms are calculated differerntly
for Darcy and non-Darcy cases):
[strain rate] = (u - uw) * k / gamw * eta
Note that `non_darcy_drain_eta` only uses the exponential portion of the
Non-Darcian flow relationship. If hydraulic gradients are greater than
iL then the flow rates will be overestimated.
Parameters
----------
re : float
Drain influence radius.
iL : float
Limiting hydraulic gradient beyond which flow follows Darcy's law.
gamw : float
Unit weight of water. Usually gamw=10 kN/m**3 or gamw=9.807 kN/m**3.
beta_function : obj or string
The non_darcy_beta function to use. e.g. non_darcy_beta_ideal
non_darcy_beat_constant, non_darcy_piecewise_constant.
This can either be the function object itself
or the name of the function e.g. 'non_darcy_beta_ideal'.
*args, **kwargs : various
The arguments to pass to the beta_function.
Returns
-------
eta : float
Value of eta parameter for non-Darcian flow
Examples
--------
>>> non_darcy_drain_eta(re=1.5, iL=10, gamw=10,
... beta_function='non_darcy_beta_ideal', n=15, nflow=1.3, nterms=20)
0.09807...
>>> non_darcy_drain_eta(1.5, 10, 10,
... 'non_darcy_beta_ideal', 15, nflow=1.3, nterms=20)
0.09807...
>>> non_darcy_drain_eta(re=1.5, iL=10, gamw=10,
... beta_function='non_darcy_beta_ideal', n=np.array([20.0, 15.0]),
... nflow=np.array([1.000001, 1.3]), nterms=20)
array([0.3943..., 0.0980...])
"""
# beta_function is object or string
try:
beta_fn = globals()[beta_function]
except KeyError:
beta_fn = beta_function
# extract n=re/rw from the **kwargs dict or 1st element of the *arg list
try:
n = kwargs['n']
except:
n = args[0]
rw = re / n
nflow = kwargs['nflow']
beta = beta_fn(*args, **kwargs)
eta = 2 / (re**2 * beta**nflow * (rw * gamw)**(nflow - 1)
* nflow * iL**(nflow - 1))
return eta
########################################################################
#scratch()
def scratch():
"""scratch pad for testing latex markup for docstrings
"""
#scratch()
pass
if __name__ == '__main__':
# watch()
import nose
nose.runmodule(argv=['nose', '--verbosity=3', '--with-doctest', '--doctest-options=+ELLIPSIS'])
eta = 5
pattern = 't'
mu_function = mu_overlapping_linear
rw = 0.05
s = 5#[5,6]
kap = 2#[2,1]
muw = 1
print(back_calc_drain_spacing_from_eta(eta, pattern, mu_function, rw, s, kap, muw))
#u_constant()
#k_overlapping_linear(()
scratch()
# print('lin',u_linear(5,2,3,[1.5,4]))
# print('pwise', u_piecewise_linear([1,2,5],[3,1,1],[1.5,4]))
# x = np.array(
# [1., 1.06779661, 1.13559322, 1.20338983, 1.27118644,
# 1.33898305, 1.40677966, 1.47457627, 1.54237288, 1.61016949,
# 1.6779661 , 1.74576271, 1.81355932, 1.88135593, 1.94915254,
# 2.01694915, 2.08474576, 2.15254237, 2.22033898, 2.28813559,
# 2.3559322 , 2.42372881, 2.49152542, 2.55932203, 2.62711864,
# 2.69491525, 2.76271186, 2.83050847, 2.89830508, 2.96610169,
# 3.03389831, 3.10169492, 3.16949153, 3.23728814, 3.30508475,
# 3.37288136, 3.44067797, 3.50847458, 3.57627119, 3.6440678 ,
# 3.71186441, 3.77966102, 3.84745763, 3.91525424, 3.98305085,
# 4.05084746, 4.11864407, 4.18644068, 4.25423729, 4.3220339 ,
# 4.38983051, 4.45762712, 4.52542373, 4.59322034, 4.66101695,
# 4.72881356, 4.79661017, 4.86440678, 4.93220339, 5., 30 ])
#
# y = 1.0/np.array(
# [0.5 , 0.50847458, 0.51694915, 0.52542373, 0.53389831,
# 0.54237288, 0.55084746, 0.55932203, 0.56779661, 0.57627119,
# 0.58474576, 0.59322034, 0.60169492, 0.61016949, 0.61864407,
# 0.62711864, 0.63559322, 0.6440678 , 0.65254237, 0.66101695,
# 0.66949153, 0.6779661 , 0.68644068, 0.69491525, 0.70338983,
# 0.71186441, 0.72033898, 0.72881356, 0.73728814, 0.74576271,
# 0.75423729, 0.76271186, 0.77118644, 0.77966102, 0.78813559,
# 0.79661017, 0.80508475, 0.81355932, 0.8220339 , 0.83050847,
# 0.83898305, 0.84745763, 0.8559322 , 0.86440678, 0.87288136,
# 0.88135593, 0.88983051, 0.89830508, 0.90677966, 0.91525424,
# 0.92372881, 0.93220339, 0.94067797, 0.94915254, 0.95762712,
# 0.96610169, 0.97457627, 0.98305085, 0.99152542, 1., 1. ])
#
# mu_piecewise_linear(x,y)
# mu_overlapping_linear(np.array([5,10]),
# np.array([7, 12]),
# np.array([1.6, 1.5,]))
# mu_piecewise_linear([1, 5],
# [1, 1])
#
# s=80
# n=18
# kap=8
# x = np.linspace(1,n,50)
# y = k_overlapping_linear(n,s, kap, x)
# plt.plot(x,y)
# plt.gca().grid()
# plt.show()
#
# xp = np.array(
# [1., 1.06779661, 1.13559322, 1.20338983, 1.27118644,
# 1.33898305, 1.40677966, 1.47457627, 1.54237288, 1.61016949,
# 1.6779661 , 1.74576271, 1.81355932, 1.88135593, 1.94915254,
# 2.01694915, 2.08474576, 2.15254237, 2.22033898, 2.28813559,
# 2.3559322 , 2.42372881, 2.49152542, 2.55932203, 2.62711864,
# 2.69491525, 2.76271186, 2.83050847, 2.89830508, 2.96610169,
# 3.03389831, 3.10169492, 3.16949153, 3.23728814, 3.30508475,
# 3.37288136, 3.44067797, 3.50847458, 3.57627119, 3.6440678 ,
# 3.71186441, 3.77966102, 3.84745763, 3.91525424, 3.98305085,
# 4.05084746, 4.11864407, 4.18644068, 4.25423729, 4.3220339 ,
# 4.38983051, 4.45762712, 4.52542373, 4.59322034, 4.66101695,
# 4.72881356, 4.79661017, 4.86440678, 4.93220339, 5., 30 ])
#
# yp = 1.0/np.array(
# [ 0.5 , 0.51680552, 0.53332376, 0.54955473, 0.56549842,
# 0.58115484, 0.59652399, 0.61160586, 0.62640046, 0.64090779,
# 0.65512784, 0.66906061, 0.68270612, 0.69606435, 0.70913531,
# 0.72191899, 0.7344154 , 0.74662453, 0.75854639, 0.77018098,
# 0.7815283 , 0.79258834, 0.8033611 , 0.8138466 , 0.82404481,
# 0.83395576, 0.84357943, 0.85291583, 0.86196495, 0.8707268 ,
# 0.87920138, 0.88738868, 0.89528871, 0.90290147, 0.91022695,
# 0.91726515, 0.92401609, 0.93047975, 0.93665613, 0.94254525,
# 0.94814708, 0.95346165, 0.95848894, 0.96322896, 0.9676817 ,
# 0.97184717, 0.97572537, 0.97931629, 0.98261994, 0.98563631,
# 0.98836541, 0.99080724, 0.99296179, 0.99482907, 0.99640908,
# 0.99770181, 0.99870727, 0.99942545, 0.99985636, 1., 1. ])
#
#
#
# n=30
# s=5
# kap=2
# muw=0
# uw=-0.2
# x = np.linspace(1, s, 60)
# y = k_linear(n, s, kap, x)
#
# x = np.linspace(1, n, 400)
# y = u_ideal(n,x, uw=uw,muw=muw)
# y2 = u_parabolic(n,s,kap,x, uw=uw,muw=muw)
# y3 = u_linear(n,s,kap,x, uw=uw,muw=muw)
# y4 = u_constant(n,s,kap,x, uw=uw,muw=muw)
# y5 = u_piecewise_constant([s,n], [kap,1],x, uw=uw,muw=muw)
## y6 = u_piecewise_linear([1,s,s,n], [kap,kap,1,1], x, uw=uw,muw=muw)
##
## y7 = u_piecewise_linear([1,s,n], [kap,1,1], x, uw=uw,muw=muw)
## y8 = u_piecewise_linear(xp, yp, x, uw=uw,muw=muw)
## print(repr(x))
## print(repr(y))
# plt.plot(x,y, '-',label='ideal')
# plt.plot(x, y2, '--',label='para')
# plt.plot(x, y3, dashes=[5,2,2,2],label='lin')
# plt.plot(x, y4, dashes=[8,2],label='const')
# plt.plot(x, y5,'+',ms=2, label='pwisec')
## plt.plot(x, y6,'o',ms=3, label='pwisel')
## plt.plot(x, y7,'^',ms=3, label='pwisel_lin')
## plt.plot(x, y8,'^',ms=3, label='pwisel_para')
# leg=plt.gca().legend(loc=4)
# plt.gca().grid()
# plt.show()
mu_piecewise_constant([1.5,5],
[1.6,1])
scratch()
print(mu_parabolic(30,5,2))
print(k_parabolic(30, 5, 2, [1, 1.13559322]))
k_parabolic(20,1,2,[4,6,7])
# mu_linear()
# nose.runmodule(argv=['nose', '--verbosity=3'])
# print(mu_ideal(0.5))
# print(mu_linear(np.array([50,100]),
# np.array([10,20]),
# np.array([5,3])))
| gpl-3.0 |
yaroslavvb/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 56 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
moosekaka/sweepython | tubule_het/psd_rand_carbon.py | 1 | 4161 | # -*- coding: utf-8 -*-
"""
Calculates and plots POWER SPEC DENSITY (psd) of Δψ for real and random dists.
@author: sweel
"""
import os
import os.path as op
import cPickle as pickle
from collections import defaultdict
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import wrappers as wr
from tubule_het.autoCor.AutoPopFunc import psd, conv_to_pd, tidy_psd
# pylint: disable=C0103
# pylint: disable=R0204
sns.set_context("talk")
sns.set(style="whitegrid")
sns.set(rc={"legend.markerscale": 3})
colors = ["medium green",
"greyish blue",
"yellowy brown",
"reddish grey"]
# =============================================================================
# Data initialization
# =============================================================================
plt.close('all')
datadir = op.join(os.getcwd(), 'data')
rawdir = op.join(os.getcwd(), 'output')
vtkF = wr.ddwalk(op.join(rawdir, 'normalizedVTK'),
'*skeleton.vtk', start=5, stop=-13)
PSDY = defaultdict(dict) # DY_scaled autocors
PSDP = defaultdict(dict) # Shuffled dist autocors
PSDN = defaultdict(dict) # Normal dist autocors
PSDU = defaultdict(dict) # uniform dist autocors
psd_type = {'actual YPE': PSDY['YPE'],
'normal': PSDN['YPE'],
'shuffled': PSDP['YPE'],
'uniform': PSDU['YPE']}
bins = np.linspace(0, .5, 22) # bins for the x-axis (freq spectrum)
# =============================================================================
# Load fitted and real data, calculate PSD
# =============================================================================
for mtype in sorted(vtkF.keys())[:]:
for cell in vtkF[mtype].keys():
PSDU[mtype][cell] = []
PSDN[mtype][cell] = []
PSDP[mtype][cell] = []
PSDY[mtype][cell] = []
with open(op.join(rawdir,
'fitted_data_scaled',
'%s.pkl' % cell), 'rb') as inpt:
(lNorm, lNormP, randNDY, randUDY, llineId) = pickle.load(inpt)
PSDY[mtype][cell].append(psd(np.squeeze(lNorm), 40))
PSDU[mtype][cell].append(psd(np.squeeze(randUDY), 40))
PSDN[mtype][cell].append(psd(np.squeeze(randNDY), 40))
PSDP[mtype][cell].append(psd(np.squeeze(lNormP), 40))
print "done psd for %s" % cell
# =============================================================================
# Power spectrum density actual (YPE) vs random
# =============================================================================
psd_tidydata = pd.DataFrame()
for dist_type in sorted(psd_type.keys()):
realranddata = psd_type[dist_type]
psdx, psdy = conv_to_pd(realranddata)
psd_tidydata = psd_tidydata.append(
tidy_psd(psdx, psdy, bins, dist_type),
ignore_index=True)
# =============================================================================
# Power spectrum density actual by media
# =============================================================================
psd_tidydata2 = pd.DataFrame()
for carbon in sorted(PSDY.keys()):
carbondata = PSDY[carbon]
psdx, psdy = conv_to_pd(carbondata)
psd_tidydata2 = psd_tidydata2.append(
tidy_psd(psdx, psdy, bins, carbon),
ignore_index=True)
# ============================================================================
# Plot
# ============================================================================
sns.set(style="whitegrid")
# vs random
with sns.plotting_context('talk', font_scale=1.4):
_, ax1 = plt.subplots(1, 1)
sns.pointplot(x='u',
y='psd',
hue='type',
palette=sns.xkcd_palette(colors),
scale=.95,
data=psd_tidydata,
ax=ax1)
ax1.get_legend().set_visible(False)
# vs carbon type
with sns.plotting_context('talk', font_scale=1.4):
_, ax2 = plt.subplots(1, 1)
sns.pointplot(x='u',
y='psd',
hue='type',
scale=.95,
data=psd_tidydata2,
ax=ax2)
ax2.get_legend().set_visible(False)
| mit |
twschum/mix-mind | mixmind_cli.py | 1 | 15109 | #!/usr/bin/env python
"""
Turn recipes json into a readable menu
"""
import argparse
import pickle as pickle
from collections import Counter, defaultdict
import json
import jsonschema
import pandas as pd
import mixmind.recipe as drink_recipe
from mixmind.barstock import Barstock
import mixmind.formatted_menu as formatted_menu
import mixmind.util as util
def get_parser():
p = argparse.ArgumentParser(description="""
MixMind Drink Menu Generator by twschum
&&&
,@&&&
&&&&&&&&.
(&&&&&&&&&*
(&&&&&&&&&&
&&&&&&&&&,
&&&&&&
&&
&&&
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
&&% &&@ &&&
#&&, && (&&,
&&& &&& .&&&
&&&/*. .*#&&&&&&&&&&&&&&&&&&&&&&&
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
&&&&. &&&&&&&&&&&&&&&&&&&&&&&&&&&&
*&&&& *&&&&&&&&&&&&&&&&&&%&&%&&.
&&&&& &&&&&&&&&&&&&&%&&%%#&(
%#&&& &&@&&%%&&%&&&&&&%@
&&&&@ &%&%#%&&&&%@%@
&&&&. &&&&&&&&&&
.&&&&&&&&&&&&&
#&&&&&&&&&,
&&&&&&&
&&&&#
&&&&#
&&&&#
&&&&#
&&&&#
&&&&#
&&&&#
&&&&#
&&&&#
&&&&#
.&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
You'll need:
A json file of recipes conforming to the schema
{{
"Martini": {{
"info": "The King of Cocktails",
"ingredients": {{
"dry gin": 2.5,
"dry vermouth": 0.5
}},
"optional": {{
"orange bitters": "dash"
}},
"variants": ["Reverse Martini: 5 parts vermouth to 1 part gin",
"Perfect Martini: equal parts dry and sweet vermouth"],
"misc": "This line shows up below the ingredients",
"unit": "oz", # or mL, whatever the primary unit is for ingredients
"prep": "stir", # shake, build, throw
"ice": "none", # cubed, crushed
"glass": "martini", # rocks, collins, flute, cocktail
"garnish": "Lemon twist or olives"
}}
}}
A csv of ingredients based on the following format:
Category Type Bottle In Stock ABV Size (mL) Price Paid
Spirit Rye Whiskey Bulleit Rye 1 45 750 $28.96
Spirit Dry Gin New Amsterdam 0 44 1750 $25.49
Liqueur Orange Liqueur Triple Sec 1 15 750 $5.99
Vermouth Dry Vermouth Noilly Prat Dry 1 16 375 $6.99
Bitters Aromatic Bitters Angostura 1 44.7 118 $7.95
Syrup Simple Syrup Homemade 1 0 4000 $2.79
Juice Lemon Juice Fresh 1 0 45 $0.80
Mixer Club Soda Club Soda 0 0 178 $1.00
Example usage:
{} -b 'Barstock.csv' -r 'my_recipes.json' -p -e -i lime rum -x 'lemon juice' pdf my_menu -n 2 -l
""".format(__file__), formatter_class=argparse.RawTextHelpFormatter)
subparsers = p.add_subparsers(help='commands', dest='command')
# core parameters
p.add_argument('-v', '--verbose', action='store_true')
p.add_argument('-b', '--barstock', help="Barstock csv filename")
p.add_argument('-r', '--recipes', nargs='+', default=['recipes_schubar.json'], help="Recipes json filename(s)")
p.add_argument('--save_cache', action='store_true', help="Pickle the generated recipes to cache them for later use (e.g. a quicker build of the pdf)")
p.add_argument('--load_cache', action='store_true', help="Load the generated recipes from cache for use")
# display options
p.add_argument('-$', '--prices', action='store_true', help="Display prices for drinks based on stock")
p.add_argument('-p', '--prep-line', action='store_true', help="Display a line showing glass, ice, and prep")
p.add_argument('-s', '--stats', action='store_true', help="Print out a detailed statistics block for the selected recipes")
p.add_argument('-e', '--examples', action='store_true', help="Show specific examples of a recipe based on the ingredient stock")
p.add_argument('-c', '--convert', default='oz', choices=['oz','mL','cL'], help="Convert recipes to a different primary unit")
p.add_argument('-g', '--all-ingredients', action='store_true', help="Show every ingredient instead of just the main liquors with each example")
p.add_argument('-m', '--markup', default=1.2, type=float, help="Drink markup: price = ceil((base_cost+1)*markup)")
p.add_argument('--info', action='store_true', help="Show the info line for recipes")
p.add_argument('--origin', action='store_true', help="Check origin and mark drinks as Schubar originals")
p.add_argument('--variants', action='store_true', help="Show variants for drinks")
# filtering options
p.add_argument('-a', dest='all_', action='store_true', help="Include all ingredients from barstock whether or not that are marked in stock")
p.add_argument('-i', '--include', nargs='+', help="Filter by ingredient(s) that must be contained in the recipe")
p.add_argument('-x', '--exclude', nargs='+', help="Filter by ingredient(s) that must NOT be contained in the recipe")
p.add_argument('--or', dest='use_or', action='store_true', help="use logical OR for included and excluded ingredient lists instead of default AND")
p.add_argument('--name', help="Include drinks matching on name")
p.add_argument('--tag', help="Include drinks matching a tag")
p.add_argument('--style', help="Include drinks matching the style such as After Dinner or Longdrink")
p.add_argument('--list', help="Include drinks matching the specified list")
p.add_argument('--glass', help="Include drinks matching the glassware")
p.add_argument('--prep', help="Include drinks matching the prep (shake, stir, build)")
p.add_argument('--ice', help="Include drinks matching the type of ice (crushed, cubed, neat)")
# txt output
txt_parser = subparsers.add_parser('txt', help='Simple plain text output')
txt_parser.add_argument('--names', action='store_true', help="Show the names of drinks only")
txt_parser.add_argument('--ingredients', action='store_true', help="Show name and ingredients but not full recipe")
txt_parser.add_argument('-w', '--write', default=None, help="Save text menu out to a file")
# pdf (latex) output and options
pdf_parser = subparsers.add_parser('pdf', help='Options for generating a pdf via LaTeX integration')
pdf_parser.add_argument('pdf_filename', help="Basename of the pdf and tex files generated")
pdf_parser.add_argument('-n', '--ncols', default=2, type=int, help="Number of columns to use for the menu")
pdf_parser.add_argument('-l', '--liquor_list', action='store_true', help="Show list of the available ingredients")
pdf_parser.add_argument('-L', '--liquor_list_own_page', action='store_true', help="Show list of the available ingredients on a separate page")
pdf_parser.add_argument('-D', '--debug', action='store_true', help="Add debugging output to the pdf")
pdf_parser.add_argument('--align', action='store_true', help="Align drink names across columns")
pdf_parser.add_argument('--title', default=None, help="Title to use")
pdf_parser.add_argument('--tagline', default=None, help="Tagline to use below the title")
# Do alternate things
test_parser = subparsers.add_parser('test', help='whatever I need it to be')
# Do some validation
test_parser = subparsers.add_parser('validate', help='Run schema validation against recipe files')
return p
def bundle_options(tuple_class, args):
return tuple_class(*(getattr(args, field) for field in tuple_class._fields))
def main():
args = get_parser().parse_args()
display_options = bundle_options(util.DisplayOptions, args)
filter_options = bundle_options(util.FilterOptions, args)
pd.set_option('display.expand_frame_repr', False)
if args.command == 'test':
print("This is a test")
recipes = util.load_recipe_json(args.recipes)
# get all the properties
fields = {}
for recipe in recipes.values():
for label, value in recipe.items():
if label not in ['info', 'variants', 'garnish', 'IBA_description', 'optional', 'ingredients', 'misc']:
if not isinstance(value, list):
value = [value]
try:
fields.setdefault(label, Counter()).update(value)
except Exception as e:
import ipdb; ipdb.set_trace();
print(e)
for field, values in fields.items():
print("{}: {}".format(field, values.most_common()))
return
recipes = [drink_recipe.DrinkRecipe(name, recipe) for name, recipe in recipes.items()]
# output all the ingredients
ingredients = Counter()
for info in recipes.values():
ingredients.update(iter(info.get('ingredients', {}).keys()))
for i, n in ingredients.most_common():
print('{:2d} {}'.format(n, str(i).encode('ascii', errors='replace')))
return
if args.command == 'validate':
with open('recipe_schema.json') as fp:
schema = json.load(fp)
for recipe_file in args.recipes:
with open(recipe_file) as fp:
recipes = json.load(fp)
jsonschema.validate(recipes, schema)
print("{} passes schema")
return
RECIPES_CACHE_FILE = 'cache_recipes.pkl'
BARSTOCK_CACHE_FILE = 'cache_barstock.pkl'
if args.load_cache:
barstock = Barstock(pd.read_pickle(BARSTOCK_CACHE_FILE))
with open(CACHE_FILE) as fp:
recipes, filter_options = pickle.load(fp)
print("Loaded {} recipes from cache file with options:\n{}\n{}".format(len(recipes), filter_options))
else:
base_recipes = util.load_recipe_json(args.recipes)
if args.barstock:
barstock = Barstock.load(args.barstock, args.all_)
recipes = [drink_recipe.DrinkRecipe(name, recipe).generate_examples(barstock)
for name, recipe in base_recipes.items()]
else:
recipes = [drink_recipe.DrinkRecipe(name, recipe) for name, recipe in base_recipes.items()]
if args.convert:
print("Converting recipes to unit: {}".format(args.convert))
[r.convert(args.convert) for r in recipes]
recipes, excluded = util.filter_recipes(recipes, filter_options)
if args.save_cache:
barstock.df.to_pickle(BARSTOCK_CACHE_FILE)
with open(RECIPE_CACHE_FILE, 'w') as fp:
pickle.dump((recipes, filter_options), fp)
print("Saved recipes and barstock to cache file".format(len(recipes)))
if args.stats and recipes:
stats = util.report_stats(recipes)
for stat in stats:
print(stat)
if args.command == 'pdf':
#recipes.sort(key=lambda x: x.name)
if not args.barstock:
if args.liquor_list or args.liquor_list_own_page or args.examples or args.prices:
print("Must have a barstock file for these options")
return
barstock_df = None
else:
barstock_df = barstock.df
pdf_options = bundle_options(util.PdfOptions, args)
formatted_menu.generate_recipes_pdf(recipes, pdf_options, display_options, barstock_df)
return
if args.command == 'txt':
groups = defaultdict(list)
#for recipe in recipes:
#groups[str(recipe.first_ingredient())].append(recipe)
#print groups.keys()
if args.names or args.ingredients:
if args.ingredients and len(recipes):
name_w = max((len(recipe.name) for recipe in recipes))
for recipe in recipes:
try:
if args.ingredients:
print("{{:<{}}} - {{}}".format(name_w).format(recipe.name, ', '.join(recipe.get_ingredient_list())))
else:
print(recipe.name)
except UnicodeEncodeError:
from pprint import pprint; import ipdb; ipdb.set_trace()
print(recipe)
#print '\n'.join([str(len(str(recipe).split('\n')))+' '+recipe.name for recipe in recipes])
print('------------\n{} recipes\n'.format(len(recipes)))
return
#if args.write:
#with open(args.write, 'w') as fp:
#fp.write('\n\n'.join(menu))
else:
print('\n'.join([str(recipe) for recipe in recipes]))
print()
if __name__ == "__main__":
main()
| apache-2.0 |
deepesch/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
terkkila/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
redreamality/daft | examples/exoplanets.py | 7 | 1616 | """
The Fergus model of exoplanet detection
=======================================
Besides being generally awesome, this example also demonstrates how you can
color the nodes and add arbitrary labels to the figure.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
# Colors.
p_color = {"ec": "#46a546"}
s_color = {"ec": "#f89406"}
pgm = daft.PGM([3.6, 3.5], origin=[0.7, 0])
n = daft.Node("phi", r"$\phi$", 1, 3, plot_params=s_color)
n.va = "baseline"
pgm.add_node(n)
pgm.add_node(daft.Node("speckle_coeff", r"$z_i$", 2, 3, plot_params=s_color))
pgm.add_node(daft.Node("speckle_img", r"$x_i$", 2, 2, plot_params=s_color))
pgm.add_node(daft.Node("spec", r"$s$", 4, 3, plot_params=p_color))
pgm.add_node(daft.Node("shape", r"$g$", 4, 2, plot_params=p_color))
pgm.add_node(daft.Node("planet_pos", r"$\mu_i$", 3, 3, plot_params=p_color))
pgm.add_node(daft.Node("planet_img", r"$p_i$", 3, 2, plot_params=p_color))
pgm.add_node(daft.Node("pixels", r"$y_i ^j$", 2.5, 1, observed=True))
# Edges.
pgm.add_edge("phi", "speckle_coeff")
pgm.add_edge("speckle_coeff", "speckle_img")
pgm.add_edge("speckle_img", "pixels")
pgm.add_edge("spec", "planet_img")
pgm.add_edge("shape", "planet_img")
pgm.add_edge("planet_pos", "planet_img")
pgm.add_edge("planet_img", "pixels")
# And a plate.
pgm.add_plate(daft.Plate([1.5, 0.2, 2, 3.2], label=r"exposure $i$",
shift=-0.1))
pgm.add_plate(daft.Plate([2, 0.5, 1, 1], label=r"pixel $j$",
shift=-0.1))
# Render and save.
pgm.render()
pgm.figure.savefig("exoplanets.pdf")
pgm.figure.savefig("exoplanets.png", dpi=150)
| mit |
daniaki/Enrich2 | plugins/ratios_scorer.py | 1 | 4679 | # Copyright 2016-2017 Alan F Rubin
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
import logging
import numpy as np
import pandas as pd
from enrich2.plugins.scoring import BaseScorerPlugin
from enrich2.plugins.options import Options
from enrich2.base.constants import WILD_TYPE_VARIANT
from enrich2.base.utils import log_message
from enrich2.base.constants import IDENTIFIERS, VARIANTS
options = Options()
options.add_option(
name="Normalization Method",
varname="logr_method",
dtype=str,
default='Wild Type',
choices={
'Wild Type': 'wt',
'Complete Cases': 'complete',
'All Reads': 'full'
},
hidden=False
)
class RatiosScorer(BaseScorerPlugin):
name = 'Ratios'
version = '1.0'
author = 'Alan Rubin, Daniel Esposito'
def compute_scores(self):
for label in self.store_labels():
self.calc_ratios(label)
def calc_ratios(self, label):
"""
Calculate frequency ratios and standard errors between the
last timepoint and the input. Ratios can be calculated using
one of three methods:
- wt
- complete
- full
"""
if self.store_check("/main/{}/scores".format(label)):
return
log_message(
logging_callback=logging.info,
msg="Calculating ratios ({})".format(label),
extra={'oname': self.name}
)
c_last = 'c_{}'.format(self.store_timepoints()[-1])
df = self.store_select(
key="/main/{}/counts".format(label),
columns=['c_0', '{}'.format(c_last)]
)
if self.logr_method == "wt":
if VARIANTS in self.store_labels():
wt_label = VARIANTS
elif IDENTIFIERS in self.store_labels():
wt_label = IDENTIFIERS
else:
raise ValueError('Failed to use wild type log '
'ratio method, suitable data '
'table not present [{}]'.format(self.name))
shared_counts = self.store_select(
key="/main/{}/counts".format(wt_label),
columns=['c_0', '{}'.format(c_last)],
where="index='{}'".format(WILD_TYPE_VARIANT)
)
# wild type not found
if len(shared_counts) == 0:
raise ValueError('Failed to use wild type log '
'ratio method, wild type '
'sequence not present [{}]'.format(self.name))
shared_counts = shared_counts.values + 0.5
elif self.logr_method == "complete":
shared_counts = self.store_select(
key="/main/{}/counts".format(label),
columns=['c_0', '{}'.format(c_last)]
).sum(axis="index").values + 0.5
elif self.logr_method == "full":
shared_counts = self.store_select(
key="/main/{}/counts_unfiltered".format(label),
columns=['c_0', '{}'.format(c_last)]
).sum(axis="index", skipna=True).values + 0.5
else:
raise ValueError('Invalid log ratio method "{}" '
'[{}]'.format(self.logr_method, self.name))
ratios = np.log(df[['c_0', c_last]].values + 0.5) - \
np.log(shared_counts)
ratios = ratios[:, 1] - ratios[:, 0] # selected - input
ratios = pd.DataFrame(ratios, index=df.index, columns=['logratio'])
shared_variance = np.sum(1. / shared_counts)
summed = np.sum(1. / (df[['c_0', c_last]].values + 0.5), axis=1)
ratios['variance'] = summed + shared_variance
ratios['score'] = ratios['logratio']
ratios['SE'] = np.sqrt(ratios['variance'])
# re-order columns
ratios = ratios[['score', 'SE', 'logratio', 'variance']]
self.store_put(
key="/main/{}/scores".format(label),
value=ratios,
data_columns=ratios.columns
)
| gpl-3.0 |
lordkman/burnman | misc/benchmarks/debye.py | 4 | 1721 | from __future__ import absolute_import
from __future__ import print_function
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import numpy as np
import matplotlib.pyplot as plt
import burnman
import scipy.integrate
import time
def old_thermal(T, debye_T, n):
if T == 0:
return 0
return 3. * n * burnman.constants.R * T * burnman.debye_fn(debye_T / T)
def old_heat(T, debye_T, n):
if T == 0:
return 0
deb = scipy.integrate.quad(
lambda x: pow(x, 4.) * np.exp(x) / pow((np.exp(x) - 1.), 2.), 0.0, debye_T / T)
return 9. * n * burnman.constants.gas_constant * deb[0] / pow(debye_T / T, 3.)
temperatures = np.linspace(100, 5000, 10000)
Debye_T = 1000.
old = np.empty_like(temperatures)
start = time.clock()
for i in range(len(temperatures)):
old[i] = old_heat(temperatures[i], Debye_T, 1.0)
time_old = time.clock() - start
new = np.empty_like(temperatures)
start = time.clock()
for i in range(len(temperatures)):
new[i] = burnman.eos.debye.heat_capacity_v(temperatures[i], Debye_T, 1.0)
time_new = time.clock() - start
print("error %e" % np.linalg.norm((old - new) / new))
print("time old %g, time new %g" % (time_old, time_new))
temperatures = np.linspace(0, 5000, 200)
vibrational_energy = np.empty_like(temperatures)
heat_capacity = np.empty_like(temperatures)
Debye_T = 1000.
for i in range(len(temperatures)):
vibrational_energy[i] = burnman.eos.debye.thermal_energy(
temperatures[i], Debye_T, 1.0)
heat_capacity[i] = burnman.eos.debye.heat_capacity_v(
temperatures[i], Debye_T, 1.0)
plt.subplot(121)
plt.plot(temperatures, vibrational_energy)
plt.subplot(122)
plt.plot(temperatures, heat_capacity)
plt.show()
| gpl-2.0 |
akionakamura/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
dsullivan7/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
FilipDominec/python-meep-utils | colormaps.py | 28 | 50518 | # New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
__all__ = ['magma', 'inferno', 'plasma', 'viridis']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
('inferno', _inferno_data),
('plasma', _plasma_data),
('viridis', _viridis_data)):
cmaps[name] = ListedColormap(data, name=name)
magma = cmaps['magma']
inferno = cmaps['inferno']
plasma = cmaps['plasma']
viridis = cmaps['viridis']
| gpl-2.0 |
tseaver/gcloud-python | bigquery/google/cloud/bigquery/table.py | 1 | 46359 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Tables."""
from __future__ import absolute_import
import copy
import datetime
import operator
import warnings
import six
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
from google.api_core.page_iterator import HTTPIterator
import google.cloud._helpers
from google.cloud.bigquery import _helpers
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.schema import _build_schema_resource
from google.cloud.bigquery.schema import _parse_schema_resource
from google.cloud.bigquery.external_config import ExternalConfig
_NO_PANDAS_ERROR = (
'The pandas library is not installed, please install '
'pandas to use the to_dataframe() function.'
)
_TABLE_HAS_NO_SCHEMA = 'Table has no schema: call "client.get_table()"'
_MARKER = object()
def _reference_getter(table):
"""A :class:`~google.cloud.bigquery.table.TableReference` pointing to
this table.
Returns:
google.cloud.bigquery.table.TableReference: pointer to this table.
"""
from google.cloud.bigquery import dataset
dataset_ref = dataset.DatasetReference(table.project, table.dataset_id)
return TableReference(dataset_ref, table.table_id)
def _view_use_legacy_sql_getter(table):
"""bool: Specifies whether to execute the view with Legacy or Standard SQL.
This boolean specifies whether to execute the view with Legacy SQL
(:data:`True`) or Standard SQL (:data:`False`). The client side default is
:data:`False`. The server-side default is :data:`True`. If this table is
not a view, :data:`None` is returned.
Raises:
ValueError: For invalid value types.
"""
view = table._properties.get('view')
if view is not None:
# The server-side default for useLegacySql is True.
return view.get('useLegacySql', True)
# In some cases, such as in a table list no view object is present, but the
# resource still represents a view. Use the type as a fallback.
if table.table_type == 'VIEW':
# The server-side default for useLegacySql is True.
return True
class EncryptionConfiguration(object):
"""Custom encryption configuration (e.g., Cloud KMS keys).
Args:
kms_key_name (str): resource ID of Cloud KMS key used for encryption
"""
def __init__(self, kms_key_name=None):
self._properties = {}
if kms_key_name is not None:
self._properties['kmsKeyName'] = kms_key_name
@property
def kms_key_name(self):
"""str: Resource ID of Cloud KMS key
Resource ID of Cloud KMS key or :data:`None` if using default
encryption.
"""
return self._properties.get('kmsKeyName')
@kms_key_name.setter
def kms_key_name(self, value):
self._properties['kmsKeyName'] = value
@classmethod
def from_api_repr(cls, resource):
"""Construct an encryption configuration from its API representation
Args:
resource (Dict[str, object]):
An encryption configuration representation as returned from
the API.
Returns:
google.cloud.bigquery.table.EncryptionConfiguration:
An encryption configuration parsed from ``resource``.
"""
config = cls()
config._properties = copy.deepcopy(resource)
return config
def to_api_repr(self):
"""Construct the API resource representation of this encryption
configuration.
Returns:
Dict[str, object]:
Encryption configuration as represented as an API resource
"""
return copy.deepcopy(self._properties)
class TableReference(object):
"""TableReferences are pointers to tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables
Args:
dataset_ref (google.cloud.bigquery.dataset.DatasetReference):
A pointer to the dataset
table_id (str): The ID of the table
"""
def __init__(self, dataset_ref, table_id):
self._project = dataset_ref.project
self._dataset_id = dataset_ref.dataset_id
self._table_id = table_id
@property
def project(self):
"""str: Project bound to the table"""
return self._project
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._dataset_id
@property
def table_id(self):
"""str: The table ID."""
return self._table_id
@property
def path(self):
"""str: URL path for the table's APIs."""
return '/projects/%s/datasets/%s/tables/%s' % (
self._project, self._dataset_id, self._table_id)
@classmethod
def from_string(cls, full_table_id):
"""Construct a table reference from fully-qualified table ID.
Args:
full_table_id (str):
A fully-qualified table ID in standard SQL format. Must
included a project ID, dataset ID, and table ID, each
separated by ``.``.
Returns:
TableReference: Table reference parsed from ``full_table_id``.
Examples:
>>> TableReference.from_string('my-project.mydataset.mytable')
TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable')
Raises:
ValueError:
If ``full_table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
from google.cloud.bigquery.dataset import DatasetReference
parts = full_table_id.split('.')
if len(parts) != 3:
raise ValueError(
'full_table_id must be a fully-qualified table ID in '
'standard SQL format. e.g. "project.dataset.table", got '
'{}'.format(full_table_id))
return cls(DatasetReference(parts[0], parts[1]), parts[2])
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``.
"""
from google.cloud.bigquery.dataset import DatasetReference
project = resource['projectId']
dataset_id = resource['datasetId']
table_id = resource['tableId']
return cls(DatasetReference(project, dataset_id), table_id)
def to_api_repr(self):
"""Construct the API resource representation of this table reference.
Returns:
Dict[str, object]: Table reference represented as an API resource
"""
return {
'projectId': self._project,
'datasetId': self._dataset_id,
'tableId': self._table_id,
}
def _key(self):
"""A tuple key that uniquely describes this field.
Used to compute this instance's hashcode and evaluate equality.
Returns:
Tuple[str]: The contents of this :class:`DatasetReference`.
"""
return (
self._project,
self._dataset_id,
self._table_id,
)
def __eq__(self, other):
if not isinstance(other, TableReference):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __repr__(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset_ref = DatasetReference(self._project, self._dataset_id)
return "TableReference({}, '{}')".format(
repr(dataset_ref), self._table_id)
class Table(object):
"""Tables represent a set of rows whose values correspond to a schema.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables
Args:
table_ref (google.cloud.bigquery.table.TableReference):
A pointer to a table
schema (List[google.cloud.bigquery.schema.SchemaField]):
The table's schema
"""
_PROPERTY_TO_API_FIELD = {
'friendly_name': 'friendlyName',
'expires': 'expirationTime',
'time_partitioning': 'timePartitioning',
'partitioning_type': 'timePartitioning',
'partition_expiration': 'timePartitioning',
'view_use_legacy_sql': 'view',
'view_query': 'view',
'external_data_configuration': 'externalDataConfiguration',
'encryption_configuration': 'encryptionConfiguration',
}
def __init__(self, table_ref, schema=None):
self._properties = {
'tableReference': table_ref.to_api_repr(),
'labels': {},
}
# Let the @property do validation.
if schema is not None:
self.schema = schema
@property
def project(self):
"""str: Project bound to the table."""
return self._properties['tableReference']['projectId']
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._properties['tableReference']['datasetId']
@property
def table_id(self):
"""str: ID of the table."""
return self._properties['tableReference']['tableId']
reference = property(_reference_getter)
@property
def path(self):
"""str: URL path for the table's APIs."""
return '/projects/%s/datasets/%s/tables/%s' % (
self.project, self.dataset_id, self.table_id)
@property
def schema(self):
"""List[google.cloud.bigquery.schema.SchemaField]: Table's schema.
Raises:
TypeError: If 'value' is not a sequence
ValueError:
If any item in the sequence is not a
:class:`~google.cloud.bigquery.schema.SchemaField`
"""
prop = self._properties.get('schema')
if not prop:
return []
else:
return _parse_schema_resource(prop)
@schema.setter
def schema(self, value):
if value is None:
self._properties['schema'] = None
elif not all(isinstance(field, SchemaField) for field in value):
raise ValueError('Schema items must be fields')
else:
self._properties['schema'] = {
'fields': _build_schema_resource(value)
}
@property
def labels(self):
"""Dict[str, str]: Labels for the table.
This method always returns a dict. To change a table's labels,
modify the dict, then call ``Client.update_table``. To delete a
label, set its value to :data:`None` before updating.
Raises:
ValueError: If ``value`` type is invalid.
"""
return self._properties.setdefault('labels', {})
@labels.setter
def labels(self, value):
if not isinstance(value, dict):
raise ValueError("Pass a dict")
self._properties['labels'] = value
@property
def encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See `protecting data with Cloud KMS keys
<https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_
in the BigQuery documentation.
"""
prop = self._properties.get('encryptionConfiguration')
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@encryption_configuration.setter
def encryption_configuration(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._properties['encryptionConfiguration'] = api_repr
@property
def created(self):
"""Union[datetime.datetime, None]: Datetime at which the table was
created (:data:`None` until set from the server).
"""
creation_time = self._properties.get('creationTime')
if creation_time is not None:
# creation_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(creation_time))
@property
def etag(self):
"""Union[str, None]: ETag for the table resource (:data:`None` until
set from the server).
"""
return self._properties.get('etag')
@property
def modified(self):
"""Union[datetime.datetime, None]: Datetime at which the table was last
modified (:data:`None` until set from the server).
"""
modified_time = self._properties.get('lastModifiedTime')
if modified_time is not None:
# modified_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(modified_time))
@property
def num_bytes(self):
"""Union[int, None]: The size of the table in bytes (:data:`None` until
set from the server).
"""
return _helpers._int_or_none(self._properties.get('numBytes'))
@property
def num_rows(self):
"""Union[int, None]: The number of rows in the table (:data:`None`
until set from the server).
"""
return _helpers._int_or_none(self._properties.get('numRows'))
@property
def self_link(self):
"""Union[str, None]: URL for the table resource (:data:`None` until set
from the server).
"""
return self._properties.get('selfLink')
@property
def full_table_id(self):
"""Union[str, None]: ID for the table (:data:`None` until set from the
server).
In the format ``project_id:dataset_id.table_id``.
"""
return self._properties.get('id')
@property
def table_type(self):
"""Union[str, None]: The type of the table (:data:`None` until set from
the server).
Possible values are ``'TABLE'``, ``'VIEW'``, or ``'EXTERNAL'``.
"""
return self._properties.get('type')
@property
def time_partitioning(self):
"""google.cloud.bigquery.table.TimePartitioning: Configures time-based
partitioning for a table.
Raises:
ValueError:
If the value is not :class:`TimePartitioning` or :data:`None`.
"""
prop = self._properties.get('timePartitioning')
if prop is not None:
return TimePartitioning.from_api_repr(prop)
@time_partitioning.setter
def time_partitioning(self, value):
api_repr = value
if isinstance(value, TimePartitioning):
api_repr = value.to_api_repr()
elif value is not None:
raise ValueError(
"value must be google.cloud.bigquery.table.TimePartitioning "
"or None")
self._properties['timePartitioning'] = api_repr
@property
def partitioning_type(self):
"""Union[str, None]: Time partitioning of the table if it is
partitioned (Defaults to :data:`None`).
The only partitioning type that is currently supported is
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.type_ instead.",
UserWarning)
if self.time_partitioning is not None:
return self.time_partitioning.type_
@partitioning_type.setter
def partitioning_type(self, value):
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.type_ instead.",
UserWarning)
if self.time_partitioning is None:
self._properties['timePartitioning'] = {}
self._properties['timePartitioning']['type'] = value
@property
def partition_expiration(self):
"""Union[int, None]: Expiration time in milliseconds for a partition.
If :attr:`partition_expiration` is set and :attr:`type_` is
not set, :attr:`type_` will default to
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.expiration_ms instead.",
UserWarning)
if self.time_partitioning is not None:
return self.time_partitioning.expiration_ms
@partition_expiration.setter
def partition_expiration(self, value):
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.expiration_ms instead.",
UserWarning)
if self.time_partitioning is None:
self._properties['timePartitioning'] = {
'type': TimePartitioningType.DAY}
self._properties['timePartitioning']['expirationMs'] = str(value)
@property
def clustering_fields(self):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
Clustering fields are immutable after table creation.
.. note::
As of 2018-06-29, clustering fields cannot be set on a table
which does not also have time partioning defined.
"""
prop = self._properties.get('clustering')
if prop is not None:
return list(prop.get('fields', ()))
@clustering_fields.setter
def clustering_fields(self, value):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
"""
if value is not None:
prop = self._properties.setdefault('clustering', {})
prop['fields'] = value
else:
if 'clustering' in self._properties:
del self._properties['clustering']
@property
def description(self):
"""Union[str, None]: Description of the table (defaults to
:data:`None`).
Raises:
ValueError: For invalid value types.
"""
return self._properties.get('description')
@description.setter
def description(self, value):
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['description'] = value
@property
def expires(self):
"""Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
Raises:
ValueError: For invalid value types.
"""
expiration_time = self._properties.get('expirationTime')
if expiration_time is not None:
# expiration_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(expiration_time))
@expires.setter
def expires(self, value):
if not isinstance(value, datetime.datetime) and value is not None:
raise ValueError("Pass a datetime, or None")
value_ms = google.cloud._helpers._millis_from_datetime(value)
self._properties['expirationTime'] = _helpers._str_or_none(value_ms)
@property
def friendly_name(self):
"""Union[str, None]: Title of the table (defaults to :data:`None`).
Raises:
ValueError: For invalid value types.
"""
return self._properties.get('friendlyName')
@friendly_name.setter
def friendly_name(self, value):
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['friendlyName'] = value
@property
def location(self):
"""Union[str, None]: Location in which the table is hosted
Defaults to :data:`None`.
"""
return self._properties.get('location')
@property
def view_query(self):
"""Union[str, None]: SQL query defining the table as a view (defaults
to :data:`None`).
By default, the query is treated as Standard SQL. To use Legacy
SQL, set :attr:`view_use_legacy_sql` to :data:`True`.
Raises:
ValueError: For invalid value types.
"""
view = self._properties.get('view')
if view is not None:
return view.get('query')
@view_query.setter
def view_query(self, value):
if not isinstance(value, six.string_types):
raise ValueError("Pass a string")
view = self._properties.get('view')
if view is None:
view = self._properties['view'] = {}
view['query'] = value
# The service defaults useLegacySql to True, but this
# client uses Standard SQL by default.
if view.get('useLegacySql') is None:
view['useLegacySql'] = False
@view_query.deleter
def view_query(self):
"""Delete SQL query defining the table as a view."""
self._properties.pop('view', None)
view_use_legacy_sql = property(_view_use_legacy_sql_getter)
@view_use_legacy_sql.setter
def view_use_legacy_sql(self, value):
if not isinstance(value, bool):
raise ValueError("Pass a boolean")
if self._properties.get('view') is None:
self._properties['view'] = {}
self._properties['view']['useLegacySql'] = value
@property
def streaming_buffer(self):
"""google.cloud.bigquery.StreamingBuffer: Information about a table's
streaming buffer.
"""
sb = self._properties.get('streamingBuffer')
if sb is not None:
return StreamingBuffer(sb)
@property
def external_data_configuration(self):
"""Union[google.cloud.bigquery.ExternalConfig, None]: Configuration for
an external data source (defaults to :data:`None`).
Raises:
ValueError: For invalid value types.
"""
prop = self._properties.get('externalDataConfiguration')
if prop is not None:
prop = ExternalConfig.from_api_repr(prop)
return prop
@external_data_configuration.setter
def external_data_configuration(self, value):
if not (value is None or isinstance(value, ExternalConfig)):
raise ValueError("Pass an ExternalConfig or None")
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._properties['externalDataConfiguration'] = api_repr
@classmethod
def from_string(cls, full_table_id):
"""Construct a table from fully-qualified table ID.
Args:
full_table_id (str):
A fully-qualified table ID in standard SQL format. Must
included a project ID, dataset ID, and table ID, each
separated by ``.``.
Returns:
Table: Table parsed from ``full_table_id``.
Examples:
>>> Table.from_string('my-project.mydataset.mytable')
Table(TableRef...(D...('my-project', 'mydataset'), 'mytable'))
Raises:
ValueError:
If ``full_table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
return cls(TableReference.from_string(full_table_id))
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a table given its API representation
Args:
resource (Dict[str, object]):
Table resource representation from the API
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset containing the table.
Returns:
google.cloud.bigquery.table.Table: Table parsed from ``resource``.
Raises:
KeyError:
If the ``resource`` lacks the key ``'tableReference'``, or if
the ``dict`` stored within the key ``'tableReference'`` lacks
the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``.
"""
from google.cloud.bigquery import dataset
if ('tableReference' not in resource or
'tableId' not in resource['tableReference']):
raise KeyError('Resource lacks required identity information:'
'["tableReference"]["tableId"]')
project_id = resource['tableReference']['projectId']
table_id = resource['tableReference']['tableId']
dataset_id = resource['tableReference']['datasetId']
dataset_ref = dataset.DatasetReference(project_id, dataset_id)
table = cls(dataset_ref.table(table_id))
table._properties = resource
return table
def to_api_repr(self):
"""Constructs the API resource of this table
Returns:
Dict[str, object]: Table represented as an API resource
"""
return copy.deepcopy(self._properties)
def _build_resource(self, filter_fields):
"""Generate a resource for ``update``."""
partial = {}
for filter_field in filter_fields:
api_field = self._PROPERTY_TO_API_FIELD.get(filter_field)
if api_field is None and filter_field not in self._properties:
raise ValueError('No Table property %s' % filter_field)
elif api_field is not None:
partial[api_field] = self._properties.get(api_field)
else:
# allows properties that are not defined in the library
# and properties that have the same name as API resource key
partial[filter_field] = self._properties[filter_field]
return partial
def __repr__(self):
return 'Table({})'.format(repr(self.reference))
class TableListItem(object):
"""A read-only table resource from a list operation.
For performance reasons, the BigQuery API only includes some of the table
properties when listing tables. Notably,
:attr:`~google.cloud.bigquery.table.Table.schema` and
:attr:`~google.cloud.bigquery.table.Table.num_rows` are missing.
For a full list of the properties that the BigQuery API returns, see the
`REST documentation for tables.list
<https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list>`_.
Args:
resource (Dict[str, object]):
A table-like resource object from a table list response. A
``tableReference`` property is required.
Raises:
ValueError:
If ``tableReference`` or one of its required members is missing
from ``resource``.
"""
def __init__(self, resource):
if 'tableReference' not in resource:
raise ValueError('resource must contain a tableReference value')
if 'projectId' not in resource['tableReference']:
raise ValueError(
"resource['tableReference'] must contain a projectId value")
if 'datasetId' not in resource['tableReference']:
raise ValueError(
"resource['tableReference'] must contain a datasetId value")
if 'tableId' not in resource['tableReference']:
raise ValueError(
"resource['tableReference'] must contain a tableId value")
self._properties = resource
@property
def project(self):
"""str: Project bound to the table."""
return self._properties['tableReference']['projectId']
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._properties['tableReference']['datasetId']
@property
def table_id(self):
"""str: ID of the table."""
return self._properties['tableReference']['tableId']
reference = property(_reference_getter)
@property
def labels(self):
"""Dict[str, str]: Labels for the table.
This method always returns a dict. To change a table's labels,
modify the dict, then call ``Client.update_table``. To delete a
label, set its value to :data:`None` before updating.
"""
return self._properties.setdefault('labels', {})
@property
def full_table_id(self):
"""Union[str, None]: ID for the table (:data:`None` until set from the
server).
In the format ``project_id:dataset_id.table_id``.
"""
return self._properties.get('id')
@property
def table_type(self):
"""Union[str, None]: The type of the table (:data:`None` until set from
the server).
Possible values are ``'TABLE'``, ``'VIEW'``, or ``'EXTERNAL'``.
"""
return self._properties.get('type')
@property
def time_partitioning(self):
"""google.cloud.bigquery.table.TimePartitioning: Configures time-based
partitioning for a table.
"""
prop = self._properties.get('timePartitioning')
if prop is not None:
return TimePartitioning.from_api_repr(prop)
@property
def partitioning_type(self):
"""Union[str, None]: Time partitioning of the table if it is
partitioned (Defaults to :data:`None`).
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"TableListItem.time_partitioning.type_ instead.",
PendingDeprecationWarning)
if self.time_partitioning is not None:
return self.time_partitioning.type_
@property
def partition_expiration(self):
"""Union[int, None]: Expiration time in milliseconds for a partition.
If this property is set and :attr:`type_` is not set, :attr:`type_`
will default to :attr:`TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"TableListItem.time_partitioning.expiration_ms instead.",
PendingDeprecationWarning)
if self.time_partitioning is not None:
return self.time_partitioning.expiration_ms
@property
def friendly_name(self):
"""Union[str, None]: Title of the table (defaults to :data:`None`)."""
return self._properties.get('friendlyName')
view_use_legacy_sql = property(_view_use_legacy_sql_getter)
def _row_from_mapping(mapping, schema):
"""Convert a mapping to a row tuple using the schema.
Args:
mapping (Dict[str, object])
Mapping of row data: must contain keys for all required fields in
the schema. Keys which do not correspond to a field in the schema
are ignored.
schema (List[google.cloud.bigquery.schema.SchemaField]):
The schema of the table destination for the rows
Returns:
Tuple[object]:
Tuple whose elements are ordered according to the schema.
Raises:
ValueError: If schema is empty.
"""
if len(schema) == 0:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
row = []
for field in schema:
if field.mode == 'REQUIRED':
row.append(mapping[field.name])
elif field.mode == 'REPEATED':
row.append(mapping.get(field.name, ()))
elif field.mode == 'NULLABLE':
row.append(mapping.get(field.name))
else:
raise ValueError(
"Unknown field mode: {}".format(field.mode))
return tuple(row)
class StreamingBuffer(object):
"""Information about a table's streaming buffer.
See https://cloud.google.com/bigquery/streaming-data-into-bigquery.
Args:
resource (Dict[str, object]):
streaming buffer representation returned from the API
"""
def __init__(self, resource):
self.estimated_bytes = int(resource['estimatedBytes'])
self.estimated_rows = int(resource['estimatedRows'])
# time is in milliseconds since the epoch.
self.oldest_entry_time = (
google.cloud._helpers._datetime_from_microseconds(
1000.0 * int(resource['oldestEntryTime'])))
class Row(object):
"""A BigQuery row.
Values can be accessed by position (index), by key like a dict,
or as properties.
Args:
values (Sequence[object]): The row values
field_to_index (Dict[str, int]):
A mapping from schema field names to indexes
"""
# Choose unusual field names to try to avoid conflict with schema fields.
__slots__ = ('_xxx_values', '_xxx_field_to_index')
def __init__(self, values, field_to_index):
self._xxx_values = values
self._xxx_field_to_index = field_to_index
def values(self):
"""Return the values included in this row.
Returns:
Sequence[object]: A sequence of length ``len(row)``.
"""
return copy.deepcopy(self._xxx_values)
def keys(self):
"""Return the keys for using a row as a dict.
Returns:
Iterable[str]: The keys corresponding to the columns of a row
Examples:
>>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).keys())
['x', 'y']
"""
return six.iterkeys(self._xxx_field_to_index)
def items(self):
"""Return items as ``(key, value)`` pairs.
Returns:
Iterable[Tuple[str, object]]:
The ``(key, value)`` pairs representing this row.
Examples:
>>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items())
[('x', 'a'), ('y', 'b')]
"""
for key, index in six.iteritems(self._xxx_field_to_index):
yield (key, copy.deepcopy(self._xxx_values[index]))
def get(self, key, default=None):
"""Return a value for key, with a default value if it does not exist.
Args:
key (str): The key of the column to access
default (object):
The default value to use if the key does not exist. (Defaults
to :data:`None`.)
Returns:
object:
The value associated with the provided key, or a default value.
Examples:
When the key exists, the value associated with it is returned.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x')
'a'
The default value is :data:`None` when the key does not exist.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z')
None
The default value can be overrided with the ``default`` parameter.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '')
''
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '')
''
"""
index = self._xxx_field_to_index.get(key)
if index is None:
return default
return self._xxx_values[index]
def __getattr__(self, name):
value = self._xxx_field_to_index.get(name)
if value is None:
raise AttributeError('no row field {!r}'.format(name))
return self._xxx_values[value]
def __len__(self):
return len(self._xxx_values)
def __getitem__(self, key):
if isinstance(key, six.string_types):
value = self._xxx_field_to_index.get(key)
if value is None:
raise KeyError('no row field {!r}'.format(key))
key = value
return self._xxx_values[key]
def __eq__(self, other):
if not isinstance(other, Row):
return NotImplemented
return(
self._xxx_values == other._xxx_values and
self._xxx_field_to_index == other._xxx_field_to_index)
def __ne__(self, other):
return not self == other
def __repr__(self):
# sort field dict by value, for determinism
items = sorted(self._xxx_field_to_index.items(),
key=operator.itemgetter(1))
f2i = '{' + ', '.join('%r: %d' % item for item in items) + '}'
return 'Row({}, {})'.format(self._xxx_values, f2i)
class RowIterator(HTTPIterator):
"""A class for iterating through HTTP/JSON API row list responses.
Args:
client (google.cloud.bigquery.Client): The API client.
api_request (Callable[google.cloud._http.JSONConnection.api_request]):
The function to use to make API requests.
path (str): The method path to query for the list of items.
page_token (str): A token identifying a page in a result set to start
fetching results from.
max_results (int, optional): The maximum number of results to fetch.
page_size (int, optional): The number of items to return per page.
extra_params (Dict[str, object]):
Extra query string parameters for the API call.
"""
def __init__(self, client, api_request, path, schema, page_token=None,
max_results=None, page_size=None, extra_params=None):
super(RowIterator, self).__init__(
client, api_request, path, item_to_value=_item_to_row,
items_key='rows', page_token=page_token, max_results=max_results,
extra_params=extra_params, page_start=_rows_page_start,
next_token='pageToken')
self._schema = schema
self._field_to_index = _helpers._field_to_index_mapping(schema)
self._total_rows = None
self._page_size = page_size
def _get_next_page_response(self):
"""Requests the next page from the path provided.
Returns:
Dict[str, object]:
The parsed JSON response of the next page's contents.
"""
params = self._get_query_params()
if self._page_size is not None:
params['maxResults'] = self._page_size
return self.api_request(
method=self._HTTP_METHOD,
path=self.path,
query_params=params)
@property
def schema(self):
"""List[google.cloud.bigquery.schema.SchemaField]: Table's schema."""
return list(self._schema)
@property
def total_rows(self):
"""int: The total number of rows in the table."""
return self._total_rows
def to_dataframe(self):
"""Create a pandas DataFrame from the query results.
Returns:
pandas.DataFrame:
A :class:`~pandas.DataFrame` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError: If the :mod:`pandas` library cannot be imported.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
column_headers = [field.name for field in self.schema]
# Use generator, rather than pulling the whole rowset into memory.
rows = (row.values() for row in iter(self))
return pandas.DataFrame(rows, columns=column_headers)
class _EmptyRowIterator(object):
"""An empty row iterator.
This class prevents API requests when there are no rows to fetch or rows
are impossible to fetch, such as with query results for DDL CREATE VIEW
statements.
"""
schema = ()
pages = ()
total_rows = 0
def to_dataframe(self):
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
return pandas.DataFrame()
def __iter__(self):
return iter(())
class TimePartitioningType(object):
"""Specifies the type of time partitioning to perform."""
DAY = 'DAY'
"""str: Generates one partition per day."""
class TimePartitioning(object):
"""Configures time-based partitioning for a table.
Args:
type_ (google.cloud.bigquery.table.TimePartitioningType, optional):
Specifies the type of time partitioning to perform. Defaults to
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`,
which is the only currently supported type.
field (str, optional):
If set, the table is partitioned by this field. If not set, the
table is partitioned by pseudo column ``_PARTITIONTIME``. The field
must be a top-level ``TIMESTAMP`` or ``DATE`` field. Its mode must
be ``NULLABLE`` or ``REQUIRED``.
expiration_ms(int, optional):
Number of milliseconds for which to keep the storage for a
partition.
require_partition_filter (bool, optional):
If set to true, queries over the partitioned table require a
partition filter that can be used for partition elimination to be
specified.
"""
def __init__(self, type_=None, field=None, expiration_ms=None,
require_partition_filter=None):
self._properties = {}
if type_ is None:
self.type_ = TimePartitioningType.DAY
else:
self.type_ = type_
if field is not None:
self.field = field
if expiration_ms is not None:
self.expiration_ms = expiration_ms
if require_partition_filter is not None:
self.require_partition_filter = require_partition_filter
@property
def type_(self):
"""google.cloud.bigquery.table.TimePartitioningType: The type of time
partitioning to use.
"""
return self._properties['type']
@type_.setter
def type_(self, value):
self._properties['type'] = value
@property
def field(self):
"""str: Field in the table to use for partitioning"""
return self._properties.get('field')
@field.setter
def field(self, value):
self._properties['field'] = value
@property
def expiration_ms(self):
"""int: Number of milliseconds to keep the storage for a partition."""
return _helpers._int_or_none(self._properties.get('expirationMs'))
@expiration_ms.setter
def expiration_ms(self, value):
self._properties['expirationMs'] = str(value)
@property
def require_partition_filter(self):
"""bool: Specifies whether partition filters are required for queries
"""
return self._properties.get('requirePartitionFilter')
@require_partition_filter.setter
def require_partition_filter(self, value):
self._properties['requirePartitionFilter'] = value
@classmethod
def from_api_repr(cls, api_repr):
"""Return a :class:`TimePartitioning` object deserialized from a dict.
This method creates a new ``TimePartitioning`` instance that points to
the ``api_repr`` parameter as its internal properties dict. This means
that when a ``TimePartitioning`` instance is stored as a property of
another object, any changes made at the higher level will also appear
here::
>>> time_partitioning = TimePartitioning()
>>> table.time_partitioning = time_partitioning
>>> table.time_partitioning.field = 'timecolumn'
>>> time_partitioning.field
'timecolumn'
Args:
api_repr (Mapping[str, str]):
The serialized representation of the TimePartitioning, such as
what is output by :meth:`to_api_repr`.
Returns:
google.cloud.bigquery.table.TimePartitioning:
The ``TimePartitioning`` object.
"""
instance = cls(api_repr['type'])
instance._properties = api_repr
return instance
def to_api_repr(self):
"""Return a dictionary representing this object.
This method returns the properties dict of the ``TimePartitioning``
instance rather than making a copy. This means that when a
``TimePartitioning`` instance is stored as a property of another
object, any changes made at the higher level will also appear here.
Returns:
dict:
A dictionary representing the TimePartitioning object in
serialized form.
"""
return self._properties
def _item_to_row(iterator, resource):
"""Convert a JSON row to the native object.
.. note::
This assumes that the ``schema`` attribute has been
added to the iterator after being created, which
should be done by the caller.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a row.
:rtype: :class:`~google.cloud.bigquery.table.Row`
:returns: The next row in the page.
"""
return Row(_helpers._row_tuple_from_json(resource, iterator.schema),
iterator._field_to_index)
# pylint: disable=unused-argument
def _rows_page_start(iterator, page, response):
"""Grab total rows when :class:`~google.cloud.iterator.Page` starts.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type page: :class:`~google.api_core.page_iterator.Page`
:param page: The page that was just created.
:type response: dict
:param response: The JSON API response for a page of rows in a table.
"""
total_rows = response.get('totalRows')
if total_rows is not None:
total_rows = int(total_rows)
iterator._total_rows = total_rows
# pylint: enable=unused-argument
| apache-2.0 |
simon-pepin/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
zooniverse/aggregation | active_weather/text_detection.py | 1 | 4656 | #!/usr/bin/env python
import glob
import active_weather
import matplotlib.pyplot as plt
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
height = 40#int(np.median(heights))
width = 30#int(np.median(widths))
# plt.show()
# assert False
# for wind in sliding_window(img,5,(50,50)):
# plt.imshow(wind[2])
# plt.show()
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
def scale_img(img):
res = cv2.resize(img, (28, 28), interpolation=cv2.INTER_CUBIC)
res = (255-res)/255.
res = np.reshape(res,784)
array = np.ndarray((1,784))
array[0,:] = res
return array
# print(mnist.test.images[0].shape)
# print(type(probabilities.eval(feed_dict={x: mnist.test.images[0]}, session=sess)))
examples = []
for i in range(3000):
l = mnist.test.labels[i]
if l[5] == 1:
# t = np.reshape(mnist.test.images[i],(28,28))
# plt.imshow(t)
# plt.show()
#
# break
examples.append(mnist.test.images[i])
from sklearn.decomposition import PCA
print(len(examples))
pca = PCA(n_components=50)
X_r = pca.fit(np.asarray(examples)).transform(np.asarray(examples))
print(sum(pca.explained_variance_ratio_))
print(X_r.shape)
avg_2 = np.median(X_r,axis=0)
inverse = pca.inverse_transform(avg_2)
inverse = np.reshape(inverse,(28,28))
plt.imshow(inverse)
plt.show()
import math
probabilities=y
text = []
for fname in glob.glob("/home/ggdhines/Databases/old_weather/aligned_images/Bear/1940/*.JPG")[:40]:
fname = "/home/ggdhines/Databases/old_weather/aligned_images/Bear/1940/Bear-AG-29-1940-0009.JPG"
img = active_weather.__extract_region__(fname)
id_ = fname.split("/")[-1][:-4]
print(id_)
# set a baseline for performance with otsu's binarization
mask = active_weather.__create_mask__(img)
horizontal_grid, vertical_grid = active_weather.__cell_boundaries__(img)
pca_image, threshold, inverted = active_weather.__pca__(img, active_weather.__otsu_bin__)
masked_image = active_weather.__mask_lines__(pca_image, mask)
# plt.imshow(masked_image)
# plt.show()
im2, contours, hierarchy = cv2.findContours(masked_image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt,h in zip(contours,hierarchy[0]):
if h[0] == -1:
continue
leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])[0]
rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])[0]
topmost = tuple(cnt[cnt[:, :, 1].argmax()][0])[1]
bottommost = tuple(cnt[cnt[:, :, 1].argmin()][0])[1]
perimeter = cv2.arcLength(cnt, True)
# template2 = np.zeros(img.shape,np.uint8)
# template2.fill(255)
# cv2.drawContours(template2, [cnt], 0, 0, -1)
# plt.imshow(template2)
# plt.show()
if (rightmost - leftmost > width) or (topmost - bottommost > height):
continue
if (rightmost - leftmost > 10) and (topmost - bottommost > 10) and (cv2.arcLength(cnt,True) > 10):
# template = np.zeros((topmost - bottommost,rightmost - leftmost), np.uint8)
# template = np.zeros((height,width), np.uint8)
# template.fill(255)
# print(template.shape)
s = cnt.shape
cnt = np.reshape(cnt,(s[0],s[2]))
# cnt[:,0] -= leftmost
# cnt[:,1] -= bottommost
# cv2.drawContours(template, [cnt], 0, 0,-1)
# print(leftmost,rightmost)
template = masked_image[bottommost:topmost,leftmost:rightmost]
# plt.imshow(masked_image[bottommost:topmost,leftmost:rightmost],cmap="gray")
# plt.show()
# print(template.shape)
res = scale_img(template)
T = pca.transform(np.asarray(res))
print(np.power(np.sum(np.power(T-avg_2,2)),0.5))
p = probabilities.eval(feed_dict={x: res}, session=sess)[0]
res = np.reshape(res,(28,28))
plt.imshow(res)
print(np.max(p),np.argmax(p))
plt.show()
break | apache-2.0 |
btabibian/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
tcchenbtx/project-zeta-J | code/correlation_analysis_scripts_sub5.py | 3 | 23223 | import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib import colors
import copy
# maximally responded area, percentile setting:
percent = 80
# object_list
object_list = ["bottle", "cat", "chair", "face", "house", "scissors", "scrambledpix", "shoe"]
# important path:
base_path = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.join(base_path, "..")
figure_path = os.path.join(base_path, "code", "images", "")
file_path = os.path.join(base_path, "code", "txt", "")
# color display
nice_cmap_values = np.loadtxt(file_path + 'actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# generalized analysis, choose which subject to focus on:
subid = "sub005"
# generate list for odd and even run values:
odd_runs = ["%s_odd_%s" % (subid, i) for i in object_list]
even_runs = ["%s_even_%s" % (subid, i) for i in object_list]
# separator:
separator = "-" * 80
############################ Start 2D analysis #############################
print ("Advanced correlation analysis:")
print (separator)
print ("")
# load even and odd run results
all_runs = {}
for i in odd_runs:
all_runs[i] = np.loadtxt(file_path + i + ".txt")
for i in even_runs:
all_runs[i] = np.loadtxt(file_path + i + ".txt")
# reshape to 3d images
all_3d = {}
for key, txt in all_runs.items():
all_3d[key] = np.reshape(txt, (-1, 25, 1))
# make a copy of the images for making figures:
all_3d_fig = copy.deepcopy(all_3d)
# save each 3d image as figure
for key, fig in all_3d_fig.items():
fig[fig == 0] = np.nan
plt.imshow(fig[:, :, 0], interpolation="nearest", cmap=nice_cmap)
plt.title("%s" % key)
plt.savefig(figure_path + "%s.png" % key)
plt.clf()
plt.close()
# save all 3d images as one compiled figure
fig = plt.figure(figsize=[8.0, 5])
i = 1
# plot odd run results
for item in object_list:
plt.subplot(2, 8, i, xticks=[], yticks=[])
plt.imshow(all_3d_fig["%s_odd_%s" % (subid, item)][:, :, 0], interpolation="nearest", cmap=nice_cmap)
plt.title("%s" % item, fontsize=8, weight='bold')
i += 1
# plot even run results
for item in object_list:
plt.subplot(2, 8, i, xticks=[], yticks=[])
plt.imshow(all_3d_fig["%s_even_%s" % (subid, item)][:, :, 0], interpolation="nearest", cmap=nice_cmap)
i += 1
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)
# label the figure:
fig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')
fig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')
fig.text(0.05, 0.93, 'Average brain images for odd runs / even runs of %s' % subid, fontsize=16, weight='bold')
# save figure
plt.savefig(figure_path + "odd_even_compile_%s.png" % subid)
# close pyplot window
plt.close()
# report
print ("Average odd run and even run results are saved as images")
print (separator)
# Run correlation:
all_results = []
print ("correlation analysis:")
for i in odd_runs:
result = []
for j in even_runs:
corr = np.corrcoef(all_runs[i], all_runs[j])
result.append(corr[0, 1])
print ("%s vs %s: %.4f" % (i, j, corr[0, 1]))
all_results.append(result)
table_result = np.array(all_results)
np.savetxt(file_path + "correlation_value_%s.txt" % subid, np.ravel(table_result))
# make table to display the correlation:
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=table_result.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.2, 0.85, "Correlation between odd runs and even runs for %s" % subid, weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "correlation_table_%s.png" % subid)
###############################################################################
# remove the maximally responded area and perform the correlation once again:
# create a copy of data to work on this analysis:
new_all_runs = copy.deepcopy(all_runs)
# remove data that is >= 80 percentile of all data
for key, result in new_all_runs.items():
thresh = np.percentile(result, q=80)
nparray = np.array(result)
nparray[nparray >= thresh] = 0
new_all_runs[key] = nparray
# reshape the new_all_runs:
new_all_3d = {}
for key, txt in new_all_runs.items():
new_all_3d[key] = np.reshape(txt, (-1, 25, 1))
# make a copy of the images for making figures:
new_all_3d_fig = copy.deepcopy(new_all_3d)
# clear the background
for key, fig in new_all_3d_fig.items():
fig[fig == 0] = np.nan
# save all 3d images as one compiled figure
fig = plt.figure(figsize=[8.0, 5])
i = 1
for item in object_list:
plt.subplot(2, 8, i, xticks=[], yticks=[])
plt.imshow(new_all_3d_fig["%s_odd_%s" % (subid, item)][:, :, 0], interpolation="nearest", cmap=nice_cmap)
plt.title("%s" % item, fontsize=8, weight='bold')
i += 1
for item in object_list:
plt.subplot(2, 8, i, xticks=[], yticks=[])
plt.imshow(new_all_3d_fig["%s_even_%s" % (subid, item)][:, :, 0], interpolation="nearest", cmap=nice_cmap)
i += 1
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)
# label the figure:
fig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')
fig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')
fig.text(0.08, 0.93, "Average brain images after removing 80%% max for %s" % subid, fontsize=14, weight='bold')
plt.savefig(figure_path + "non_max_odd_even_compile_%s.png" % subid)
plt.close()
# Run correlation:
non_max_all_results = []
print ("correlation analysis of non-maximal results:")
for i in odd_runs:
result = []
for j in even_runs:
corr = np.corrcoef(new_all_runs[i], new_all_runs[j])
result.append(corr[0, 1])
print ("%s vs %s: %.4f" % (i, j, corr[0, 1]))
non_max_all_results.append(result)
non_max_table_result = np.array(non_max_all_results)
np.savetxt(file_path + "non_max_correlation_value_%s.txt" % subid, np.ravel(non_max_table_result))
# make table to display the correlation:
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=non_max_table_result.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(1.0, 0.85, "Correlation of non_maximal responded brain of %s" % subid, weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "non_max_correlation_table_%s.png" % subid)
plt.close()
# generate bar plot
ind = np.arange(8)
width = 0.35
fig = plt.figure(figsize=(10, 24))
for plot_num in range(8):
i = plot_num -1
ax = plt.subplot(8, 1, plot_num, frameon=False)
bar_plot1 = ax.bar(ind, table_result[i, :], width, color='royalblue')
bar_plot2 = ax.bar(ind+width, non_max_table_result[i, :], width, color='deepskyblue')
# add some label:
ax.set_ylabel("Correlation")
ax.set_title("%s" % object_list[i])
ax.set_xticks(ind+width)
ax.set_xticklabels(object_list)
ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.axhline(0, color='black', linewidth=2)
ax.axvline(0, color='black', linewidth=2)
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.5, bottom=0.05, top=0.95)
plt.savefig(figure_path + "%s_2d_total_correlation_bar_both.png" % subid)
# generate individual bar plot
ind = np.arange(8)
width = 0.35
fig = plt.figure(figsize=(12, 5))
for i in range(8):
ax = plt.subplot(111, frameon=False)
bar_plot1 = ax.bar(ind, table_result[i, :], width, color='royalblue')
bar_plot2 = ax.bar(ind+width, non_max_table_result[i, :], width, color='deepskyblue')
# add some label:
ax.set_ylabel("Correlation")
ax.set_title("%s" % object_list[i])
ax.set_xticks(ind+width)
ax.set_xticklabels(object_list)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.axhline(0, color='black', linewidth=2)
ax.axvline(0, color='black', linewidth=2)
ax.legend((bar_plot1[0], bar_plot2[0]), ('All responding voxels', 'Excluding max responded voxels'), bbox_to_anchor=(0.7, 1.06), loc=2, borderaxespad=0., fontsize=12)
plt.savefig(figure_path + "%s_2d_%s_total_correlation_bar_both.png" % (subid, object_list[i]))
plt.clf()
plt.close()
##################################################################
# subtract the mean and try the correlation:
# create a copy of data to work on this analysis:
subtrmean_all_runs = copy.deepcopy(all_runs)
# performing subtraction:
# even result - odd_scramblepix
# odd result - even_scramblepix
total_mean = np.zeros_like(subtrmean_all_runs["%s_odd_face" % subid])
total_num = 0
for key, result in subtrmean_all_runs.items():
nparray = np.array(result)
total_mean += nparray
total_num += 1
total_mean = total_mean/total_num
subtract_mean_result = {}
for key, result in subtrmean_all_runs.items():
nparray = np.array(result)
subtract_mean_result[key] = nparray - total_mean
# reshape the subtract_mean_result:
subtract_mean_all_3d = {}
for key, txt in subtract_mean_result.items():
subtract_mean_all_3d[key] = np.reshape(txt, (-1, 25, 1))
# make a copy of the images for making figures:
subtract_mean_all_3d_fig = copy.deepcopy(subtract_mean_all_3d)
# clear the background
for key, fig in subtract_mean_all_3d_fig.items():
fig[fig == 0] = np.nan
# save all 3d images as one compiled figure
fig = plt.figure(figsize=[8.0, 5])
i = 1
for item in object_list:
plt.subplot(2, 8, i, xticks=[], yticks=[])
plt.imshow(subtract_mean_all_3d_fig["%s_odd_%s" % (subid, item)][:, :, 0], interpolation="nearest", cmap=nice_cmap)
plt.title("%s" % item, fontsize=8, weight='bold')
i += 1
for item in object_list:
plt.subplot(2, 8, i, xticks=[], yticks=[])
plt.imshow(subtract_mean_all_3d_fig["%s_even_%s" % (subid, item)][:, :, 0], interpolation="nearest", cmap=nice_cmap)
i += 1
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.2, bottom=0.05, top=0.9)
# label the figure:
fig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')
fig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')
fig.text(0.16, 0.93, 'Brain images after subtracting mean for %s' % subid, fontsize=16, weight='bold')
plt.savefig(figure_path + "subtract_mean_odd_even_compile_%s.png" % subid)
plt.close()
# Run correlation:
subtract_mean_all_results = []
print ("correlation analysis of subtracted results:")
for i in odd_runs:
result = []
for j in even_runs:
corr = np.corrcoef(subtract_mean_result[i], subtract_mean_result[j])
result.append(corr[0, 1])
print ("%s vs %s: %.4f" % (i, j, corr[0, 1]))
subtract_mean_all_results.append(result)
subtract_mean_table_result = np.array(subtract_mean_all_results)
# make table to display the correlation:
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=subtract_mean_table_result.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.3, 0.85, "Correlation of mean subtracted brain images of %s" % subid, weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "subtract_mean_correlation_table_%s.png" % subid)
plt.close()
print (separator)
################################## Start 3D analysis ######################################
print ("Advanced correlation analysis with 3D results:")
print (separator)
print ("")
# load even and odd run results
all_runs_3d = {}
for i in odd_runs:
all_runs_3d[i] = np.loadtxt(file_path + i + "_3d.txt")
for i in even_runs:
all_runs_3d[i] = np.loadtxt(file_path + i + "_3d.txt")
# reshape to 3d images
all_3d_for3d = {}
for key, txt in all_runs_3d.items():
all_3d_for3d[key] = np.reshape(txt, (-1, 25, 5))
# make a copy of the images for making figures:
all_3d_for3d_fig = copy.deepcopy(all_3d_for3d)
# clear background
for key, fig in all_3d_for3d_fig.items():
fig[fig == 0] = np.nan
# save all 3d images as one compiled figure for each z
fig = plt.figure(figsize=[8.0, 20])
i = 1
for item in object_list:
for index in range(5):
plt.subplot(16, 5, i, xticks=[], yticks=[])
plt.imshow(all_3d_for3d_fig["%s_odd_%s" % (subid, item)][:, :, index], interpolation="nearest", cmap=nice_cmap)
if index == 2:
plt.title("Odd Run %s" % item, fontsize=8, weight='bold')
i += 1
for index in range(5):
plt.subplot(16, 5, i, xticks=[], yticks=[])
plt.imshow(all_3d_for3d_fig["%s_even_%s" % (subid, item)][:, :, index], interpolation="nearest", cmap=nice_cmap)
if index == 2:
plt.title("Even Run %s" % item, fontsize=8, weight='bold')
i += 1
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.5, bottom=0.05, top=0.835)
# label the figure:
fig.text(0.25, 0.85, 'Average brain 3D images for odd runs / even runs of %s' % subid, fontsize=16, weight='bold')
plt.savefig(figure_path + "3d_odd_even_compile_%s.png" % subid)
plt.close()
# Run correlation:
all_results_3d = []
print ("3D correlation analysis:")
for i in odd_runs:
result = []
for j in even_runs:
corr = np.corrcoef(np.ravel(all_runs_3d[i]), np.ravel(all_runs_3d[j]))
result.append(corr[0, 1])
print ("%s vs %s: %.4f" % (i, j, corr[0, 1]))
all_results_3d.append(result)
table_result_3d = np.array(all_results_3d)
np.savetxt(file_path + "3d_correlation_value_%s.txt" % subid, np.ravel(table_result_3d))
# make table to display the correlation:
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=table_result_3d.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.2, 0.85, "Correlation between 3D odd runs and even runs for %s" % subid, weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "3d_correlation_table_%s.png" % subid)
#########################################################################################
# remove the maximally responded 3D area and perform the correlation once again:
# create a copy of data to work on this analysis:
new_all_runs_for3d = copy.deepcopy(all_runs_3d)
# remove data that is >= 80 percentile of all data
for key, result in new_all_runs_for3d.items():
thresh = np.percentile(result, q=90)
nparray = np.array(result)
nparray[nparray >= thresh] = 0
new_all_runs_for3d[key] = nparray
# reshape the new_all_runs:
new_all_3d_for3d = {}
for key, txt in new_all_runs_for3d.items():
new_all_3d_for3d[key] = np.reshape(txt, (-1, 25, 5))
# make a copy of the images for making figures:
new_all_3d_fig_for3d = copy.deepcopy(new_all_3d_for3d)
# clear the background
for key, fig in new_all_3d_fig_for3d.items():
fig[fig == 0] = np.nan
# save all 3d images as one compiled figure
fig = plt.figure(figsize=[8.0, 12.0])
i = 1
for item in object_list:
for index in range(5):
plt.subplot(16, 5, i, xticks=[], yticks=[])
plt.imshow(new_all_3d_fig_for3d["%s_odd_%s" % (subid, item)][:, :, index], interpolation="nearest", cmap=nice_cmap)
if index == 2:
plt.title("%s" % item, fontsize=8, weight='bold')
i += 1
for item in object_list:
for index in range(5):
plt.subplot(16, 5, i, xticks=[], yticks=[])
plt.imshow(new_all_3d_fig_for3d["%s_even_%s" % (subid, item)][:, :, index], interpolation="nearest", cmap=nice_cmap)
if index == 2:
plt.title("%s" % item, fontsize=8, weight='bold')
i += 1
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)
# label the figure:
fig.text(0.2, 0.85, "Average 3D brain images after removing 80%% max for %s" % subid, weight='bold')
plt.savefig(figure_path + "3d_non_max_odd_even_compile_%s.png" % subid)
plt.close()
# Run correlation:
non_max_all_results_for3d = []
print ("correlation analysis of 3D non-maximal results:")
for i in odd_runs:
result = []
for j in even_runs:
corr = np.corrcoef(new_all_runs_for3d[i], new_all_runs_for3d[j])
result.append(corr[0, 1])
print ("%s vs %s: %.4f" % (i, j, corr[0, 1]))
non_max_all_results_for3d.append(result)
non_max_table_result_for3d = np.array(non_max_all_results_for3d)
np.savetxt(file_path + "3d_non_max_correlation_value_%s.txt" % subid, np.ravel(non_max_table_result_for3d))
# make table to display the correlation:
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=non_max_table_result_for3d.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.16, 0.85, "3D Correlation of non_maximal responded brain of %s" % subid, weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "3d_non_max_correlation_table_%s.png" % subid)
plt.close()
# generate bar plot
ind = np.arange(8)
width = 0.35
fig = plt.figure(figsize=(10, 24))
for plot_num in range(8):
i = plot_num -1
ax = plt.subplot(8, 1, plot_num, frameon=False)
bar_plot1 = ax.bar(ind, table_result_3d[i, :], width, color='darkgoldenrod')
bar_plot2 = ax.bar(ind+width, non_max_table_result_for3d[i, :], width, color='tan')
# add some label:
ax.set_ylabel("Correlation")
ax.set_title("%s" % object_list[i])
ax.set_xticks(ind+width)
ax.set_xticklabels(object_list)
ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.axhline(0, color='black', linewidth=2)
ax.axvline(0, color='black', linewidth=2)
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.5, bottom=0.05, top=0.95)
plt.savefig(figure_path + "%s_3d_total_correlation_bar_both.png" % subid)
# generate individual bar plot
ind = np.arange(8)
width = 0.35
fig = plt.figure(figsize=(12, 5))
for i in range(8):
ax = plt.subplot(111, frameon=False)
bar_plot1 = ax.bar(ind, table_result_3d[i, :], width, color='darkgoldenrod')
bar_plot2 = ax.bar(ind+width, non_max_table_result_for3d[i, :], width, color='tan')
# add some label:
ax.set_ylabel("Correlation")
ax.set_title("%s" % object_list[i])
ax.set_xticks(ind+width)
ax.set_xticklabels(object_list)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.axhline(0, color='black', linewidth=2)
ax.axvline(0, color='black', linewidth=2)
ax.legend((bar_plot1[0], bar_plot2[0]), ('All responding voxels', 'Excluding max responded voxels'), bbox_to_anchor=(0.7, 1.06), loc=2, borderaxespad=0., fontsize=12)
plt.savefig(figure_path + "%s_3d_%s_total_correlation_bar_both.png" % (subid, object_list[i]))
plt.clf()
plt.close()
############################# subtract mean and do the correlation for 3D ##################################
# subtract the mean and try the correlation:
# create a copy of data to work on this analysis:
subtrmean_all_runs_3d = copy.deepcopy(all_runs_3d)
# performing subtraction:
# even result - odd_scramblepix
# odd result - even_scramblepix
total_mean_3d = np.zeros_like(subtrmean_all_runs_3d["%s_odd_face" % subid])
total_num_3d = 0
for key, result in subtrmean_all_runs_3d.items():
nparray = np.array(result)
total_mean_3d += nparray
total_num_3d += 1
total_mean_3d = total_mean_3d/total_num_3d
subtract_mean_result_for3d = {}
for key, result in subtrmean_all_runs_3d.items():
nparray = np.array(result)
subtract_mean_result_for3d[key] = nparray - total_mean_3d
# reshape the subtract_mean_result:
subtract_mean_all_3d_for3d = {}
for key, txt in subtract_mean_result_for3d.items():
subtract_mean_all_3d_for3d[key] = np.reshape(txt, (-1, 25, 5))
# make a copy of the images for making figures:
subtract_mean_all_3d_fig_for3d = copy.deepcopy(subtract_mean_all_3d_for3d)
# clear the background
for key, fig in subtract_mean_all_3d_fig_for3d.items():
fig[fig == 0] = np.nan
# save all 3d images as one compiled figure
fig = plt.figure(figsize=[8.0, 12.0])
i = 1
for item in object_list:
for index in range(5):
plt.subplot(16, 5, i, xticks=[], yticks=[])
plt.imshow(subtract_mean_all_3d_fig_for3d["%s_odd_%s" % (subid, item)][:, :, index], interpolation="nearest", cmap=nice_cmap)
if index == 2:
plt.title("%s" % item, fontsize=8, weight='bold')
i += 1
for item in object_list:
for index in range (5):
plt.subplot(16, 5, i, xticks=[], yticks=[])
plt.imshow(subtract_mean_all_3d_fig_for3d["%s_even_%s" % (subid, item)][:, :, index], interpolation="nearest", cmap=nice_cmap)
if index == 2:
plt.title("%s" % item, fontsize=8, weight='bold')
i += 1
plt.subplots_adjust(left=0.15, wspace=0.2, hspace=0.1, bottom=0.05, top=0.835)
# label the figure:
# fig.text(0.03, 0.625, 'Odd runs', ha='left', weight='bold')
# fig.text(0.03, 0.225, 'Even runs', ha='left', weight='bold')
fig.text(0.16, 0.93, '3D Brain images after subtracting mean for %s' % subid, fontsize=16, weight='bold')
plt.savefig(figure_path + "3d_subtract_mean_odd_even_compile_%s.png" % subid)
plt.close()
# Run correlation:
subtract_mean_all_results_for3d = []
print ("correlation analysis of subtracted results:")
for i in odd_runs:
result = []
for j in even_runs:
corr = np.corrcoef(subtract_mean_result_for3d[i], subtract_mean_result_for3d[j])
result.append(corr[0, 1])
print ("%s vs %s: %.4f" % (i, j, corr[0, 1]))
subtract_mean_all_results_for3d.append(result)
subtract_mean_table_result_for3d = np.array(subtract_mean_all_results_for3d)
# make table to display the correlation:
fig = plt.figure(figsize=(8, 4))
plt.subplot(111, frameon=False, xticks=[], yticks=[])
table = plt.table(cellText=subtract_mean_table_result_for3d.round(4), colLabels=object_list, rowLabels=object_list, loc='center', cellLoc='center')
plt.subplots_adjust(left=0.3, bottom=0, top=0.95)
fig.text(0.55, 0.75, 'Odd runs', ha='left', fontsize=12)
fig.text(0.05, 0.52, 'Even runs', ha='left', rotation=90, fontsize=12)
fig.text(0.3, 0.85, "3D Correlation of mean subtracted brain images of %s" % subid, weight='bold')
table.scale(1.2, 1.2)
plt.savefig(figure_path + "3d_subtract_mean_correlation_table_%s.png" % subid)
plt.close()
print (separator)
print ("Complete!!!") | bsd-3-clause |
arjoly/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 3 | 37569 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import compute_class_weight
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
msg = ("In LogisticRegressionCV the liblinear solver cannot handle "
"multiclass with class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set class_weight='balanced'")
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raise_message(ValueError, msg, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_multinomial_logistic_regression_with_classweight_auto():
X, y = iris.data, iris.target
model = LogisticRegression(multi_class='multinomial',
class_weight='auto', solver='lbfgs')
# 'auto' is deprecated and will be removed in 0.19
assert_warns_message(DeprecationWarning,
"class_weight='auto' heuristic is deprecated",
model.fit, X, y)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
chetan51/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py | 69 | 40833 | """
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
| gpl-3.0 |
cython-testbed/pandas | pandas/tests/scalar/timestamp/test_arithmetic.py | 5 | 2572 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pytest
import numpy as np
from pandas.compat import long
from pandas.tseries import offsets
from pandas import Timestamp, Timedelta
class TestTimestampArithmetic(object):
def test_overflow_offset(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
# ends up multiplying really large numbers which overflow
stamp = Timestamp('2017-01-13 00:00:00', freq='D')
offset = 20169940 * offsets.Day(1)
with pytest.raises(OverflowError):
stamp + offset
with pytest.raises(OverflowError):
offset + stamp
with pytest.raises(OverflowError):
stamp - offset
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
def test_timestamp_sub_datetime(self):
dt = datetime(2013, 10, 12)
ts = Timestamp(datetime(2013, 10, 13))
assert (ts - dt).days == 1
assert (dt - ts).days == -1
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
dt = datetime(2014, 3, 4)
td = timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
ts = Timestamp(dt, freq='D')
assert type(ts + 1) == Timestamp
assert type(ts - 1) == Timestamp
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
assert type(ts - dt) == Timedelta
assert type(ts + td) == Timestamp
assert type(ts - td) == Timestamp
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
td64 = np.timedelta64(1, 'D')
assert type(ts + td64) == Timestamp
assert type(ts - td64) == Timestamp
def test_addition_subtraction_preserve_frequency(self):
ts = Timestamp('2014-03-05', freq='D')
td = timedelta(days=1)
original_freq = ts.freq
assert (ts + 1).freq == original_freq
assert (ts - 1).freq == original_freq
assert (ts + td).freq == original_freq
assert (ts - td).freq == original_freq
td64 = np.timedelta64(1, 'D')
assert (ts + td64).freq == original_freq
assert (ts - td64).freq == original_freq
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/user_interfaces/embedding_in_gtk3.py | 4 | 1137 | #!/usr/bin/env python
"""
demonstrate adding a FigureCanvasGTK/GTKAgg widget to a gtk.ScrolledWindow
"""
import gtk
from matplotlib.figure import Figure
from numpy import arange, sin, pi
# uncomment to select /GTK/GTKAgg/GTKCairo
#from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
#from matplotlib.backends.backend_gtkcairo import FigureCanvasGTKCairo as FigureCanvas
win = gtk.Window()
win.connect("destroy", lambda x: gtk.main_quit())
win.set_default_size(400,300)
win.set_title("Embedding in GTK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
sw = gtk.ScrolledWindow()
win.add (sw)
# A scrolled window border goes outside the scrollbars and viewport
sw.set_border_width (10)
# policy: ALWAYS, AUTOMATIC, NEVER
sw.set_policy (hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy=gtk.POLICY_ALWAYS)
canvas = FigureCanvas(f) # a gtk.DrawingArea
canvas.set_size_request(800,600)
sw.add_with_viewport (canvas)
win.show_all()
gtk.main()
| gpl-2.0 |
kaichogami/scikit-learn | examples/applications/plot_prediction_latency.py | 85 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
rs2/pandas | pandas/tests/window/test_numba.py | 1 | 2942 | import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import Series, option_context
import pandas._testing as tm
from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
@td.skip_if_no("numba", "0.46.0")
@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestApply:
@pytest.mark.parametrize("jit", [True, False])
def test_numba_vs_cython(self, jit, nogil, parallel, nopython, center):
def f(x, *args):
arg_sum = 0
for arg in args:
arg_sum += arg
return np.mean(x) + arg_sum
if jit:
import numba
f = numba.jit(f)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
args = (2,)
s = Series(range(10))
result = s.rolling(2, center=center).apply(
f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = s.rolling(2, center=center).apply(
f, engine="cython", args=args, raw=True
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("jit", [True, False])
def test_cache(self, jit, nogil, parallel, nopython):
# Test that the functions are cached correctly if we switch functions
def func_1(x):
return np.mean(x) + 4
def func_2(x):
return np.std(x) * 5
if jit:
import numba
func_1 = numba.jit(func_1)
func_2 = numba.jit(func_2)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = Series(range(10)).rolling(2)
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# func_1 should be in the cache now
assert (func_1, "rolling_apply") in NUMBA_FUNC_CACHE
result = roll.apply(
func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_2, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# This run should use the cached func_1
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
@td.skip_if_no("numba", "0.46.0")
def test_use_global_config():
def f(x):
return np.mean(x) + 2
s = Series(range(10))
with option_context("compute.use_numba", True):
result = s.rolling(2).apply(f, engine=None, raw=True)
expected = s.rolling(2).apply(f, engine="numba", raw=True)
tm.assert_series_equal(expected, result)
| bsd-3-clause |
napsternxg/pytorch-practice | chunking_bilstm_crf_char_concat.py | 1 | 7939 |
# coding: utf-8
# In[1]:
import matplotlib
matplotlib.use("Agg")
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pytorch_utils import *
from pytorch_models import *
from utils import load_sequences, conll_classification_report_to_df
from conlleval import main as conll_eval
import re
sns.set_context("poster")
sns.set_style("ticks")
# In[2]:
TRAIN_CORPUS="data/conll2000/train.txt"
TEST_CORPUS="data/conll2000/test.txt"
# In[3]:
train_corpus = load_sequences(TRAIN_CORPUS, sep=" ", col_ids=(0, -1))
train_corpus, dev_corpus = train_corpus[100:], train_corpus[:100]
print("Total items in train corpus: %s" % len(train_corpus))
print("Total items in dev corpus: %s" % len(dev_corpus))
test_corpus = load_sequences(TEST_CORPUS, sep=" ", col_ids=(0, -1))
print("Total items in test corpus: %s" % len(test_corpus))
# In[5]:
def create_vocab(data, vocabs, char_vocab, word_idx=0):
n_vocabs = len(vocabs)
for sent in data:
for token_tags in sent:
for vocab_id in range(n_vocabs):
vocabs[vocab_id].add(token_tags[vocab_id])
char_vocab.batch_add(token_tags[word_idx])
print("Created vocabs: %s, chars[%s]" % (", ".join(
"{}[{}]".format(vocab.name, vocab.size)
for vocab in vocabs
), char_vocab.size))
# In[6]:
word_vocab = Vocab("words", UNK="UNK", lower=True)
char_vocab = Vocab("chars", UNK="<U>", lower=False)
chunk_vocab = Vocab("chunk_tags", lower=False)
create_vocab(train_corpus+dev_corpus+test_corpus, [word_vocab, chunk_vocab], char_vocab)
# In[7]:
def data2tensors(data, vocabs, char_vocab, word_idx=0, column_ids=(0, -1)):
vocabs = [vocabs[idx] for idx in column_ids]
n_vocabs = len(vocabs)
tensors = []
char_tensors = []
for sent in data:
sent_vecs = [[] for i in range(n_vocabs+1)] # Last is for char vecs
char_vecs = []
for token_tags in sent:
vocab_id = 0 # First column is the word
# lowercase the word
sent_vecs[vocab_id].append(
vocabs[vocab_id].getidx(token_tags[vocab_id].lower())
)
for vocab_id in range(1, n_vocabs):
sent_vecs[vocab_id].append(
vocabs[vocab_id].getidx(token_tags[vocab_id])
)
sent_vecs[-1].append(
[char_vocab.getidx(c) for c in token_tags[word_idx]]
)
tensors.append(sent_vecs)
return tensors
# In[8]:
train_tensors = data2tensors(train_corpus, [word_vocab, chunk_vocab], char_vocab)
dev_tensors = data2tensors(dev_corpus, [word_vocab, chunk_vocab], char_vocab)
test_tensors = data2tensors(test_corpus, [word_vocab, chunk_vocab], char_vocab)
print("Train: {}, Dev: {}, Test: {}".format(
len(train_tensors),
len(dev_tensors),
len(test_tensors),
))
# In[9]:
embedding_file="/home/napsternxg/datadrive/Downloads/Glove/glove.6B.100d.txt"
cache_file="conll2000.glove.100.npy"
ndims=100
pretrained_embeddings = load_word_vectors(embedding_file, ndims, word_vocab, cache_file)
# In[10]:
def plot_losses(train_losses, eval_losses=None, plot_std=False, ax=None):
if ax is None:
ax = plt.gca()
for losses, color, label in zip(
[train_losses, eval_losses],
["0.5", "r"],
["Train", "Eval"],
):
mean_loss, std_loss = zip(*losses)
mean_loss = np.array(mean_loss)
std_loss = np.array(std_loss)
ax.plot(
mean_loss, color=color, label=label,
linestyle="-",
)
if plot_std:
ax.fill_between(
np.arange(mean_loss.shape[0]),
mean_loss-std_loss,
mean_loss+std_loss,
color=color,
alpha=0.3
)
ax.set_xlabel("Epochs")
ax.set_ylabel("Mean Loss ($\pm$ S.D.)")
def print_predictions(corpus, predictions, filename, label_vocab):
with open(filename, "w+") as fp:
for seq, pred in zip(corpus, predictions):
for (token, true_label), pred_label in zip(seq, pred):
pred_label = label_vocab.idx2item[pred_label]
print("{}\t{}\t{}".format(token, true_label, pred_label), file=fp)
print(file=fp) # Add new line after each sequence
# In[11]:
# ## Class based
# In[19]:
class BiLSTMTaggerWordCRFModel(ModelWrapper):
def __init__(self, model,
loss_function,
use_cuda=False):
self.model = model
self.loss_function = None
self.use_cuda = use_cuda
if self.use_cuda:
#[k.cuda() for k in self.model.modules()]
self.model.cuda()
def _process_instance_tensors(self, instance_tensors, volatile=False):
X, Y, X_char = instance_tensors
X = Variable(torch.LongTensor([X]), requires_grad=False, volatile=volatile)
Y = torch.LongTensor(Y)
X_char = charseq2varlist(X_char, volatile=volatile)
return X, X_char, Y
def get_instance_loss(self, instance_tensors, zero_grad=True):
if zero_grad:
## Clear gradients before every update else memory runs out
self.model.zero_grad()
X, X_char, Y = instance_tensors
if self.use_cuda:
X = X.cuda(async=True)
Y = Y.cuda(async=True)
X_char = [t.cuda(async=True) for t in X_char]
#print(X.get_device(), [t.get_device() for t in X_char])
return self.model.loss(X, X_char, Y)
def predict(self, instance_tensors):
X, X_char, Y = self._process_instance_tensors(instance_tensors, volatile=True)
if self.use_cuda:
X = X.cuda(async=True)
Y = Y.cuda(async=True)
X_char = [t.cuda(async=True) for t in X_char]
emissions = self.model.forward(X, X_char)
return self.model.crf.forward(emissions)[1]
use_cuda=True
hidden_size=128
batch_size=64
char_emb_size=50
output_channels=25
kernel_sizes=[2, 3]
word_emb_size=100
n_embed=150 # Get this using char embedding and word embed
char_embed_kwargs=dict(
vocab_size=char_vocab.size,
embedding_size=char_emb_size,
out_channels=output_channels,
kernel_sizes=kernel_sizes
)
word_char_embedding = WordCharEmbedding(
word_vocab.size, word_emb_size,
char_embed_kwargs, dropout=0, concat=True)
# Assign glove embeddings
assign_embeddings(word_char_embedding.word_embeddings, pretrained_embeddings, fix_embedding=True)
model_wrapper = BiLSTMTaggerWordCRFModel(
LSTMTaggerWordCharCRF(word_char_embedding, n_embed, hidden_size, chunk_vocab.size),
None, use_cuda=use_cuda)
# In[33]:
model_prefix="BiLSTMCharConcatCRF_CONLL2000"
n_epochs=50
training_history = training_wrapper(
model_wrapper, train_tensors,
eval_tensors=dev_tensors,
optimizer=optim.Adam,
optimizer_kwargs={
#"lr": 0.01,
"weight_decay": 0
},
n_epochs=n_epochs,
batch_size=batch_size,
use_cuda=use_cuda,
log_file="{}.log".format(model_prefix)
)
model_wrapper.save("{}.pth".format(model_prefix))
# In[34]:
fig, ax = plt.subplots(1,1)
plot_losses(training_history["training_loss"],
training_history["evaluation_loss"],
plot_std=True,
ax=ax)
ax.legend()
sns.despine(offset=5)
plt.savefig("{}.pdf".format(model_prefix))
for title, tensors, corpus in zip(
["train", "dev", "test"],
[train_tensors, dev_tensors, test_tensors],
[train_corpus, dev_corpus, test_corpus],
):
predictions = model_wrapper.predict_batch(tensors, title=title)
print_predictions(corpus, predictions, "%s.chunking.conll" % title, chunk_vocab)
conll_eval(["conlleval", "%s.chunking.conll" % title])
| apache-2.0 |
miloharper/neural-network-animation | matplotlib/axes/_axes.py | 10 | 260820 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import reduce, xrange, zip, zip_longest
import math
import warnings
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.cbook as cbook
from matplotlib.cbook import _string_to_bool, mplDeprecation
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as _ # <-registers a date unit converter
from matplotlib import docstring
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.stackplot as mstack
import matplotlib.streamplot as mstream
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
import matplotlib.transforms as mtrans
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.axes._base import _AxesBase
from matplotlib.axes._base import _process_plot_format
iterable = cbook.iterable
is_string_like = cbook.is_string_like
is_sequence_of_strings = cbook.is_sequence_of_strings
# The axes module contains all the wrappers to plotting functions.
# All the other methods should go in the _AxesBase class.
class Axes(_AxesBase):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
### Labelling, legend and texts
def get_title(self, loc="center"):
"""Get an axes title.
Get one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
loc : {'center', 'left', 'right'}, str, optional
Which title to get, defaults to 'center'
Returns
-------
title: str
The title text string.
"""
try:
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
return title.get_text()
@docstring.dedent_interpd
def set_title(self, label, fontdict=None, loc="center", **kwargs):
"""
Set a title for the axes.
Set one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
label : str
Text to use for the title
fontdict : dict
A dictionary controlling the appearance of the title text,
the default `fontdict` is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight' : rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, str, optional
Which title to set, defaults to 'center'
Returns
-------
text : :class:`~matplotlib.text.Text`
The matplotlib text instance representing the title
Other parameters
----------------
kwargs : text properties
Other keyword arguments are text properties, see
:class:`~matplotlib.text.Text` for a list of valid text
properties.
"""
try:
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
default = {
'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc.lower()}
title.set_text(label)
title.update(default)
if fontdict is not None:
title.update(fontdict)
title.update(kwargs)
return title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the xaxis.
Parameters
----------
xlabel : string
x label
labelpad : scalar, optional, default: None
spacing in points between the label and the x-axis
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the yaxis
Parameters
----------
ylabel : string
y label
labelpad : scalar, optional, default: None
spacing in points between the label and the x-axis
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
def _get_legend_handles(self, legend_handler_map=None):
"""
Return a generator of artists that can be used as handles in
a legend.
"""
handles_original = (self.lines + self.patches +
self.collections + self.containers)
handler_map = mlegend.Legend.get_default_handler_map()
if legend_handler_map is not None:
handler_map = handler_map.copy()
handler_map.update(legend_handler_map)
has_handler = mlegend.Legend.get_legend_handler
for handle in handles_original:
label = handle.get_label()
if label != '_nolegend_' and has_handler(handler_map, handle):
yield handle
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
handles = []
labels = []
for handle in self._get_legend_handles(legend_handler_map):
label = handle.get_label()
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def legend(self, *args, **kwargs):
"""
Places a legend on the axes.
To make a legend for lines which already exist on the axes
(via plot for instance), simply call this function with an iterable
of strings, one for each legend item. For example::
ax.plot([1, 2, 3])
ax.legend(['A simple line'])
However, in order to keep the "label" and the legend element
instance together, it is preferable to specify the label either at
artist creation, or by calling the
:meth:`~matplotlib.artist.Artist.set_label` method on the artist::
line, = ax.plot([1, 2, 3], label='Inline label')
# Overwrite the label by calling the method.
line.set_label('Label via method')
ax.legend()
Specific lines can be excluded from the automatic legend element
selection by defining a label starting with an underscore.
This is default for all artists, so calling :meth:`legend` without
any arguments and without setting the labels manually will result in
no legend being drawn.
For full control of which artists have a legend entry, it is possible
to pass an iterable of legend artists followed by an iterable of
legend labels respectively::
legend((line1, line2, line3), ('label1', 'label2', 'label3'))
Parameters
----------
loc : int or string or pair of floats, default: 0
The location of the legend. Possible codes are:
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
Alternatively can be a 2-tuple giving ``x, y`` of the lower-left
corner of the legend in axes coordinates (in which case
``bbox_to_anchor`` will be ignored).
bbox_to_anchor : :class:`matplotlib.transforms.BboxBase` instance \
or tuple of floats
Specify any arbitrary location for the legend in `bbox_transform`
coordinates (default Axes coordinates).
For example, to put the legend's upper right hand corner in the
center of the axes the following keywords can be used::
loc='upper right', bbox_to_anchor=(0.5, 0.5)
ncol : integer
The number of columns that the legend has. Default is 1.
prop : None or :class:`matplotlib.font_manager.FontProperties` or dict
The font properties of the legend. If None (default), the current
:data:`matplotlib.rcParams` will be used.
fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium',\
'large', 'x-large', 'xx-large'}
Controls the font size of the legend. If the value is numeric the
size will be the absolute font size in points. String values are
relative to the current default font size. This argument is only
used if `prop` is not specified.
numpoints : None or int
The number of marker points in the legend when creating a legend
entry for a line/:class:`matplotlib.lines.Line2D`.
Default is ``None`` which will take the value from the
``legend.numpoints`` :data:`rcParam<matplotlib.rcParams>`.
scatterpoints : None or int
The number of marker points in the legend when creating a legend
entry for a scatter plot/
:class:`matplotlib.collections.PathCollection`.
Default is ``None`` which will take the value from the
``legend.scatterpoints`` :data:`rcParam<matplotlib.rcParams>`.
scatteryoffsets : iterable of floats
The vertical offset (relative to the font size) for the markers
created for a scatter plot legend entry. 0.0 is at the base the
legend text, and 1.0 is at the top. To draw all markers at the
same height, set to ``[0.5]``. Default ``[0.375, 0.5, 0.3125]``.
markerscale : None or int or float
The relative size of legend markers compared with the originally
drawn ones. Default is ``None`` which will take the value from
the ``legend.markerscale`` :data:`rcParam <matplotlib.rcParams>`.
frameon : None or bool
Control whether a frame should be drawn around the legend.
Default is ``None`` which will take the value from the
``legend.frameon`` :data:`rcParam<matplotlib.rcParams>`.
fancybox : None or bool
Control whether round edges should be enabled around
the :class:`~matplotlib.patches.FancyBboxPatch` which
makes up the legend's background.
Default is ``None`` which will take the value from the
``legend.fancybox`` :data:`rcParam<matplotlib.rcParams>`.
shadow : None or bool
Control whether to draw a shadow behind the legend.
Default is ``None`` which will take the value from the
``legend.shadow`` :data:`rcParam<matplotlib.rcParams>`.
framealpha : None or float
Control the alpha transparency of the legend's frame.
Default is ``None`` which will take the value from the
``legend.framealpha`` :data:`rcParam<matplotlib.rcParams>`.
mode : {"expand", None}
If `mode` is set to ``"expand"`` the legend will be horizontally
expanded to fill the axes area (or `bbox_to_anchor` if defines
the legend's size).
bbox_transform : None or :class:`matplotlib.transforms.Transform`
The transform for the bounding box (`bbox_to_anchor`). For a value
of ``None`` (default) the Axes'
:data:`~matplotlib.axes.Axes.transAxes` transform will be used.
title : str or None
The legend's title. Default is no title (``None``).
borderpad : float or None
The fractional whitespace inside the legend border.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.borderpad`` :data:`rcParam<matplotlib.rcParams>`.
labelspacing : float or None
The vertical space between the legend entries.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.labelspacing`` :data:`rcParam<matplotlib.rcParams>`.
handlelength : float or None
The length of the legend handles.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.handlelength`` :data:`rcParam<matplotlib.rcParams>`.
handletextpad : float or None
The pad between the legend handle and text.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.handletextpad`` :data:`rcParam<matplotlib.rcParams>`.
borderaxespad : float or None
The pad between the axes and legend border.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.borderaxespad`` :data:`rcParam<matplotlib.rcParams>`.
columnspacing : float or None
The spacing between columns.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.columnspacing`` :data:`rcParam<matplotlib.rcParams>`.
handler_map : dict or None
The custom dictionary mapping instances or types to a legend
handler. This `handler_map` updates the default handler map
found at :func:`matplotlib.legend.Legend.get_legend_handler_map`.
Notes
-----
Not all kinds of artist are supported by the legend command.
See :ref:`plotting-guide-legend` for details.
Examples
--------
.. plot:: mpl_examples/api/legend_demo.py
"""
handlers = kwargs.get('handler_map', {}) or {}
# Support handles and labels being passed as keywords.
handles = kwargs.pop('handles', None)
labels = kwargs.pop('labels', None)
if handles is not None and labels is None:
labels = [handle.get_label() for handle in handles]
for label, handle in zip(labels[:], handles[:]):
if label.startswith('_'):
warnings.warn('The handle {!r} has a label of {!r} which '
'cannot be automatically added to the '
'legend.'.format(handle, label))
labels.remove(label)
handles.remove(handle)
elif labels is not None and handles is None:
# Get as many handles as there are labels.
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
# No arguments - automatically detect labels and handles.
elif len(args) == 0:
handles, labels = self.get_legend_handles_labels(handlers)
if not handles:
warnings.warn("No labelled objects found. "
"Use label='...' kwarg on individual plots.")
return None
# One argument. User defined labels - automatic handle detection.
elif len(args) == 1:
labels, = args
# Get as many handles as there are labels.
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
# Two arguments. Either:
# * user defined handles and labels
# * user defined labels and location (deprecated)
elif len(args) == 2:
if is_string_like(args[1]) or isinstance(args[1], int):
cbook.warn_deprecated('1.4', 'The "loc" positional argument '
'to legend is deprecated. Please use '
'the "loc" keyword instead.')
labels, loc = args
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
kwargs['loc'] = loc
else:
handles, labels = args
# Three arguments. User defined handles, labels and
# location (deprecated).
elif len(args) == 3:
cbook.warn_deprecated('1.4', 'The "loc" positional argument '
'to legend is deprecated. Please '
'use the "loc" keyword instead.')
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend.')
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
self.legend_._remove_method = lambda h: setattr(self, 'legend_', None)
return self.legend_
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
Add text to the axes.
Add text in string `s` to axis at location `x`, `y`, data
coordinates.
Parameters
----------
x, y : scalars
data coordinates
s : string
text
fontdict : dictionary, optional, default: None
A dictionary to override the default text properties. If fontdict
is None, the defaults are determined by your rc parameters.
withdash : boolean, optional, default: False
Creates a `~matplotlib.text.TextWithDash` instance instead of a
`~matplotlib.text.Text` instance.
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties.
Other miscellaneous text parameters.
Examples
--------
Individual keyword arguments can be used to override any given
parameter::
>>> text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
>>> text(0.5, 0.5,'matplotlib', horizontalalignment='center',
... verticalalignment='center',
... transform=ax.transAxes)
You can put a rectangular box around the text instance (e.g., to
set a background color) by using the keyword `bbox`. `bbox` is
a dictionary of `~matplotlib.patches.Rectangle`
properties. For example::
>>> text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
"""
default = {
'verticalalignment': 'baseline',
'horizontalalignment': 'left',
'transform': self.transData,
'clip_on': False}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s)
else:
t = mtext.Text(
x=x, y=y, text=s)
self._set_artist_props(t)
t.update(default)
if fontdict is not None:
t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
t.set_clip_path(self.patch)
return t
@docstring.dedent_interpd
def annotate(self, *args, **kwargs):
"""
Create an annotation: a piece of text referring to a data
point.
Parameters
----------
s : string
label
xy : (x, y)
position of element to annotate
xytext : (x, y) , optional, default: None
position of the label `s`
xycoords : string, optional, default: "data"
string that indicates what type of coordinates `xy` is. Examples:
"figure points", "figure pixels", "figure fraction", "axes
points", .... See `matplotlib.text.Annotation` for more details.
textcoords : string, optional
string that indicates what type of coordinates `text` is. Examples:
"figure points", "figure pixels", "figure fraction", "axes
points", .... See `matplotlib.text.Annotation` for more details.
Default is None.
arrowprops : `matplotlib.lines.Line2D` properties, optional
Dictionary of line properties for the arrow that connects
the annotation to the point. If the dictionnary has a key
`arrowstyle`, a `~matplotlib.patches.FancyArrowPatch`
instance is created and drawn. See
`matplotlib.text.Annotation` for more details on valid
options. Default is None.
Returns
-------
a : `~matplotlib.text.Annotation`
Notes
-----
%(Annotation)s
Examples
--------
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if 'clip_on' in kwargs:
a.set_clip_path(self.patch)
self.texts.append(a)
a._remove_method = lambda h: self.texts.remove(h)
return a
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal line across the axis.
Parameters
----------
y : scalar, optional, default: 0
y position in data coordinates of the horizontal line.
xmin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
xmax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
Notes
-----
kwargs are the same as kwargs to plot, and can be
used to control the line properties. e.g.,
Examples
--------
* draw a thick red hline at 'y' = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at 'y' = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at 'y' = .5 that spans the the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
See also
--------
axhspan : for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(ydata=y, kwargs=kwargs)
yy = self.convert_yunits(y)
scaley = (yy < ymin) or (yy > ymax)
trans = self.get_yaxis_transform(which='grid')
l = mlines.Line2D([xmin, xmax], [y, y], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Add a vertical line across the axes.
Parameters
----------
x : scalar, optional, default: 0
x position in data coordinates of the vertical line.
ymin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
ymax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
Examples
---------
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
See also
--------
axhspan : for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(xdata=x, kwargs=kwargs)
xx = self.convert_xunits(x)
scalex = (xx < xmin) or (xx > xmax)
trans = self.get_xaxis_transform(which='grid')
l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal span (rectangle) across the axis.
Call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, e.g., with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes::
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = self.get_yaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scalex=False)
return p
@docstring.dedent_interpd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Add a vertical span (rectangle) across the axes.
Call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, e.g., with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes::
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
trans = self.get_xaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scaley=False)
return p
@docstring.dedent
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines at each `y` from `xmin` to `xmax`.
Parameters
----------
y : scalar or sequence of scalar
y-indexes where to plot the lines.
xmin, xmax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
vlines : vertical lines
Examples
--------
.. plot:: mpl_examples/pylab_examples/vline_hline_demo.py
"""
# We do the conversion first since not all unitized data is uniform
# process the unit information
self._process_unit_info([xmin, xmax], y, kwargs=kwargs)
y = self.convert_yunits(y)
xmin = self.convert_xunits(xmin)
xmax = self.convert_xunits(xmax)
if not iterable(y):
y = [y]
if not iterable(xmin):
xmin = [xmin]
if not iterable(xmax):
xmax = [xmax]
y = np.ravel(y)
xmin = np.resize(xmin, y.shape)
xmax = np.resize(xmax, y.shape)
verts = [((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
if len(y) > 0:
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines.
Plot vertical lines at each `x` from `ymin` to `ymax`.
Parameters
----------
x : scalar or 1D array_like
x-indexes where to plot the lines.
ymin, ymax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
hlines : horizontal lines
Examples
---------
.. plot:: mpl_examples/pylab_examples/vline_hline_demo.py
"""
self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits(x)
ymin = self.convert_yunits(ymin)
ymax = self.convert_yunits(ymax)
if not iterable(x):
x = [x]
if not iterable(ymin):
ymin = [ymin]
if not iterable(ymax):
ymax = [ymax]
x = np.ravel(x)
ymin = np.resize(ymin, x.shape)
ymax = np.resize(ymax, x.shape)
verts = [((thisx, thisymin), (thisx, thisymax))
for thisx, thisymin, thisymax in zip(x, ymin, ymax)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
if len(x) > 0:
minx = min(x)
maxx = max(x)
miny = min(min(ymin), min(ymax))
maxy = max(max(ymin), max(ymax))
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def eventplot(self, positions, orientation='horizontal', lineoffsets=1,
linelengths=1, linewidths=None, colors=None,
linestyles='solid', **kwargs):
"""
Plot identical parallel lines at specific positions.
Call signature::
eventplot(positions, orientation='horizontal', lineoffsets=0,
linelengths=1, linewidths=None, color =None,
linestyles='solid'
Plot parallel lines at the given positions. positions should be a 1D
or 2D array-like object, with each row corresponding to a row or column
of lines.
This type of plot is commonly used in neuroscience for representing
neural events, where it is commonly called a spike raster, dot raster,
or raster plot.
However, it is useful in any situation where you wish to show the
timing or position of multiple sets of discrete events, such as the
arrival times of people to a business on each day of the month or the
date of hurricanes each year of the last century.
*orientation* : [ 'horizonal' | 'vertical' ]
'horizonal' : the lines will be vertical and arranged in rows
"vertical' : lines will be horizontal and arranged in columns
*lineoffsets* :
A float or array-like containing floats.
*linelengths* :
A float or array-like containing floats.
*linewidths* :
A float or array-like containing floats.
*colors*
must be a sequence of RGBA tuples (e.g., arbitrary color
strings, etc, not allowed) or a list of such sequences
*linestyles* :
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] or an array of these
values
For linelengths, linewidths, colors, and linestyles, if only a single
value is given, that value is applied to all lines. If an array-like
is given, it must have the same length as positions, and each value
will be applied to the corresponding row or column in positions.
Returns a list of :class:`matplotlib.collections.EventCollection`
objects that were added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
**Example:**
.. plot:: mpl_examples/pylab_examples/eventplot_demo.py
"""
self._process_unit_info(xdata=positions,
ydata=[lineoffsets, linelengths],
kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
positions = self.convert_xunits(positions)
lineoffsets = self.convert_yunits(lineoffsets)
linelengths = self.convert_yunits(linelengths)
if not iterable(positions):
positions = [positions]
elif any(iterable(position) for position in positions):
positions = [np.asanyarray(position) for position in positions]
else:
positions = [np.asanyarray(positions)]
if len(positions) == 0:
return []
if not iterable(lineoffsets):
lineoffsets = [lineoffsets]
if not iterable(linelengths):
linelengths = [linelengths]
if not iterable(linewidths):
linewidths = [linewidths]
if not iterable(colors):
colors = [colors]
if hasattr(linestyles, 'lower') or not iterable(linestyles):
linestyles = [linestyles]
lineoffsets = np.asarray(lineoffsets)
linelengths = np.asarray(linelengths)
linewidths = np.asarray(linewidths)
if len(lineoffsets) == 0:
lineoffsets = [None]
if len(linelengths) == 0:
linelengths = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(colors) == 0:
colors = [None]
if len(lineoffsets) == 1 and len(positions) != 1:
lineoffsets = np.tile(lineoffsets, len(positions))
lineoffsets[0] = 0
lineoffsets = np.cumsum(lineoffsets)
if len(linelengths) == 1:
linelengths = np.tile(linelengths, len(positions))
if len(linewidths) == 1:
linewidths = np.tile(linewidths, len(positions))
if len(colors) == 1:
colors = list(colors)
colors = colors * len(positions)
if len(linestyles) == 1:
linestyles = [linestyles] * len(positions)
if len(lineoffsets) != len(positions):
raise ValueError('lineoffsets and positions are unequal sized '
'sequences')
if len(linelengths) != len(positions):
raise ValueError('linelengths and positions are unequal sized '
'sequences')
if len(linewidths) != len(positions):
raise ValueError('linewidths and positions are unequal sized '
'sequences')
if len(colors) != len(positions):
raise ValueError('colors and positions are unequal sized '
'sequences')
if len(linestyles) != len(positions):
raise ValueError('linestyles and positions are unequal sized '
'sequences')
colls = []
for position, lineoffset, linelength, linewidth, color, linestyle in \
zip(positions, lineoffsets, linelengths, linewidths,
colors, linestyles):
coll = mcoll.EventCollection(position,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
colls.append(coll)
if len(positions) > 0:
# try to get min/max
min_max = [(np.min(_p), np.max(_p)) for _p in positions
if len(_p) > 0]
# if we have any non-empty positions, try to autoscale
if len(min_max) > 0:
mins, maxes = zip(*min_max)
minpos = np.min(mins)
maxpos = np.max(maxes)
minline = (lineoffsets - linelengths).min()
maxline = (lineoffsets + linelengths).max()
if colls[0].is_horizontal():
corners = (minpos, minline), (maxpos, maxline)
else:
corners = (minline, minpos), (maxline, maxpos)
self.update_datalim(corners)
self.autoscale_view()
return colls
#### Basic plotting
@docstring.dedent_interpd
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
By default, each line is assigned a different color specified by a
'color cycle'. To change this behavior, you can edit the
axes.color_cycle rcParam.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12).
See :class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop('scalex', True)
scaley = kwargs.pop('scaley', True)
if not self._hold:
self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='o', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Plot with data with dates.
Call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True,
ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ *True* | *False* ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ *False* | *True* ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.dates.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.dates.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.dates.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.dates.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates` for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange` for help on creating the required
floating point dates.
"""
if not self._hold:
self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the *x* and *y* axis.
Call signature::
loglog(*args, **kwargs)
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
Base of the *x*/*y* logarithm
*subsx*/*subsy*: [ *None* | sequence ]
The location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
*nonposx*/*nonposy*: ['mask' | 'clip' ]
Non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold:
self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the *x* axis.
Call signature::
semilogx(*args, **kwargs)
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
Base of the *x* logarithm
*subsx*: [ *None* | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
*nonposx*: [ 'mask' | 'clip' ]
Non-positive values in *x* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold:
self.cla()
d = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the *y* axis.
call signature::
semilogy(*args, **kwargs)
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ *None* | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
*nonposy*: [ 'mask' | 'clip' ]
Non-positive values in *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold:
self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of `x`.
Parameters
----------
x : sequence of scalar
hold : boolean, optional, default: True
detrend : callable, optional, default: `mlab.detrend_none`
x is detrended by the `detrend` callable. Default is no
normalization.
normed : boolean, optional, default: True
if True, normalize the data by the autocorrelation at the 0-th
lag.
usevlines : boolean, optional, default: True
if True, Axes.vlines is used to plot the vertical lines from the
origin to the acorr. Otherwise, Axes.plot is used.
maxlags : integer, optional, default: 10
number of lags to show. If None, will return all 2 * len(x) - 1
lags.
Returns
-------
(lags, c, line, b) : where:
- `lags` are a length 2`maxlags+1 lag vector.
- `c` is the 2`maxlags+1 auto correlation vectorI
- `line` is a `~matplotlib.lines.Line2D` instance returned by
`plot`.
- `b` is the x-axis.
Other parameters
-----------------
linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
Only used if usevlines is False.
marker : string, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
`mode` = 2.
Examples
--------
`~matplotlib.pyplot.xcorr` is top graph, and
`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
@docstring.dedent_interpd
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
Plot the cross correlation between *x* and *y*.
Parameters
----------
x : sequence of scalars of length n
y : sequence of scalars of length n
hold : boolean, optional, default: True
detrend : callable, optional, default: `mlab.detrend_none`
x is detrended by the `detrend` callable. Default is no
normalization.
normed : boolean, optional, default: True
if True, normalize the data by the autocorrelation at the 0-th
lag.
usevlines : boolean, optional, default: True
if True, Axes.vlines is used to plot the vertical lines from the
origin to the acorr. Otherwise, Axes.plot is used.
maxlags : integer, optional, default: 10
number of lags to show. If None, will return all 2 * len(x) - 1
lags.
Returns
-------
(lags, c, line, b) : where:
- `lags` are a length 2`maxlags+1 lag vector.
- `c` is the 2`maxlags+1 auto correlation vectorI
- `line` is a `~matplotlib.lines.Line2D` instance returned by
`plot`.
- `b` is the x-axis (none, if plot is used).
Other parameters
-----------------
linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
Only used if usevlines is False.
marker : string, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
`mode` = 2.
"""
Nx = len(x)
if Nx != len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed:
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags, maxlags + 1)
c = c[Nx - 1 - maxlags:Nx + maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
"""
Make a step plot.
Call signature::
step(x, y, *args, **kwargs)
Additional keyword args to :func:`step` are the same as those
for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i+1]
If 'post', that interval has level y[i]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
"""
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
usr_linestyle = kwargs.pop('linestyle', '')
kwargs['linestyle'] = 'steps-' + where + usr_linestyle
return self.plot(x, y, *args, **kwargs)
@docstring.dedent_interpd
def bar(self, left, height, width=0.8, bottom=None, **kwargs):
"""
Make a bar plot.
Make a bar plot with rectangles bounded by:
`left`, `left` + `width`, `bottom`, `bottom` + `height`
(left, right, bottom and top edges)
Parameters
----------
left : sequence of scalars
the x coordinates of the left sides of the bars
height : sequence of scalars
the heights of the bars
width : scalar or array-like, optional, default: 0.8
the width(s) of the bars
bottom : scalar or array-like, optional, default: None
the y coordinate(s) of the bars
color : scalar or array-like, optional
the colors of the bar faces
edgecolor : scalar or array-like, optional
the colors of the bar edges
linewidth : scalar or array-like, optional, default: None
width of bar edge(s). If None, use default
linewidth; If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
yerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
ecolor : scalar or array-like, optional, default: None
specifies the color of errorbar(s)
capsize : integer, optional, default: 3
determines the length in points of the error bar caps
error_kw :
dictionary of kwargs to be passed to errorbar method. *ecolor* and
*capsize* may be specified here rather than as independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If `edge`, aligns bars by their left edges (for vertical bars) and
by their bottom edges (for horizontal bars). If `center`, interpret
the `left` argument as the coordinates of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale
Returns
-------
`matplotlib.patches.Rectangle` instances.
Notes
-----
The optional arguments `color`, `edgecolor`, `linewidth`,
`xerr`, and `yerr` can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: `xerr` and `yerr` are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
See also
--------
barh: Plot a horizontal bar plot.
Examples
--------
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold:
self.cla()
color = kwargs.pop('color', None)
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar,
# most dimension checking and processing will be left
# to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', dict())
ecolor = kwargs.pop('ecolor', None)
capsize = kwargs.pop('capsize', 3)
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
align = kwargs.pop('align', 'edge')
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log', nonposy='clip')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
adjust_ylim = True
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log', nonposx='clip')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
adjust_xlim = True
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError('invalid orientation: %s' % orientation)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) == 0: # until to_rgba_array is changed
color = [[0, 0, 0, 0]]
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) == 0: # until to_rgba_array is changed
edgecolor = [[0, 0, 0, 0]]
if len(edgecolor) < nbars:
edgecolor *= nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left) == nbars, ("incompatible sizes: argument 'left' must "
"be length %d or scalar" % nbars)
assert len(height) == nbars, ("incompatible sizes: argument 'height' "
"must be length %d or scalar" %
nbars)
assert len(width) == nbars, ("incompatible sizes: argument 'width' "
"must be length %d or scalar" %
nbars)
assert len(bottom) == nbars, ("incompatible sizes: argument 'bottom' "
"must be length %d or scalar" %
nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
left = self.convert_xunits(left)
width = self.convert_xunits(width)
if xerr is not None:
xerr = self.convert_xunits(xerr)
if self.yaxis is not None:
bottom = self.convert_yunits(bottom)
height = self.convert_yunits(height)
if yerr is not None:
yerr = self.convert_yunits(yerr)
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i] / 2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i] / 2.
for i in xrange(len(bottom))]
else:
raise ValueError('invalid alignment: %s' % align)
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h < 0:
b += h
h = abs(h)
if w < 0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_'
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l + 0.5 * w for l, w in zip(left, width)]
y = [b + h for b, h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l + w for l, w in zip(left, width)]
y = [b + 0.5 * h for b, h in zip(bottom, height)]
if "label" not in error_kw:
error_kw["label"] = '_nolegend_'
errorbar = self.errorbar(x, y,
yerr=yerr, xerr=xerr,
fmt='none', **error_kw)
else:
errorbar = None
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin([w for w in width if w > 0])
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin * 0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin([h for h in height if h > 0])
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin * 0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_container(bar_container)
return bar_container
@docstring.dedent_interpd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
Make a horizontal bar plot.
Make a horizontal bar plot with rectangles bounded by:
`left`, `left` + `width`, `bottom`, `bottom` + `height`
(left, right, bottom and top edges)
`bottom`, `width`, `height`, and `left` can be either scalars
or sequences
Parameters
----------
bottom : scalar or array-like
the y coordinate(s) of the bars
width : scalar or array-like
the width(s) of the bars
height : sequence of scalars, optional, default: 0.8
the heights of the bars
left : sequence of scalars
the x coordinates of the left sides of the bars
Returns
--------
`matplotlib.patches.Rectangle` instances.
Other parameters
----------------
color : scalar or array-like, optional
the colors of the bars
edgecolor : scalar or array-like, optional
the colors of the bar edges
linewidth : scalar or array-like, optional, default: None
width of bar edge(s). If None, use default
linewidth; If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
yerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
ecolor : scalar or array-like, optional, default: None
specifies the color of errorbar(s)
capsize : integer, optional, default: 3
determines the length in points of the error bar caps
error_kw :
dictionary of kwargs to be passed to errorbar method. `ecolor` and
`capsize` may be specified here rather than as independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If `edge`, aligns bars by their left edges (for vertical bars) and
by their bottom edges (for horizontal bars). If `center`, interpret
the `left` argument as the coordinates of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale
Notes
-----
The optional arguments `color`, `edgecolor`, `linewidth`,
`xerr`, and `yerr` can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: `xerr` and `yerr` are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
See also
--------
bar: Plot a vertical bar plot.
"""
patches = self.bar(left=left, height=height, width=width,
bottom=bottom, orientation='horizontal', **kwargs)
return patches
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Plot horizontal bars.
Call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, i.e.,::
facecolors = 'black'
or a sequence of arguments for the various bars, i.e.,::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
def stem(self, *args, **kwargs):
"""
Create a stem plot.
Call signatures::
stem(y, linefmt='b-', markerfmt='bo', basefmt='r-')
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
If no *x* values are provided, the default is (0, 1, ..., len(y) - 1)
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
This
`document <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/stem_plot.py
"""
remember_hold = self._hold
if not self._hold:
self.cla()
self.hold(True)
# Assume there's at least one data array
y = np.asarray(args[0])
args = args[1:]
# Try a second one
try:
second = np.asarray(args[0], dtype=np.float)
x, y = y, second
args = args[1:]
except (IndexError, ValueError):
# The second array doesn't make sense, or it doesn't exist
second = np.arange(len(y))
x = second
# Popping some defaults
try:
linefmt = kwargs.pop('linefmt', args[0])
except IndexError:
linefmt = kwargs.pop('linefmt', 'b-')
try:
markerfmt = kwargs.pop('markerfmt', args[1])
except IndexError:
markerfmt = kwargs.pop('markerfmt', 'bo')
try:
basefmt = kwargs.pop('basefmt', args[2])
except IndexError:
basefmt = kwargs.pop('basefmt', 'r-')
bottom = kwargs.pop('bottom', None)
label = kwargs.pop('label', None)
markerline, = self.plot(x, y, markerfmt, label="_nolegend_")
if bottom is None:
bottom = 0
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx, thisx], [bottom, thisy], linefmt,
label="_nolegend_")
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [bottom, bottom],
basefmt, label="_nolegend_")
self.hold(remember_hold)
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1,
startangle=None, radius=None, counterclock=True,
wedgeprops=None, textprops=None):
r"""
Plot a pie chart.
Call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None,
counterclock=True, wedgeprops=None, textprops=None)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized. The wedges are plotted counterclockwise,
by default starting from the x-axis.
Keyword arguments:
*explode*: [ *None* | len(x) sequence ]
If not *None*, is a ``len(x)`` array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ *None* | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ *None* | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ *None* | format string | format function ]
If not *None*, is a string or function used to label the wedges
with their numeric value. The label will be placed inside the
wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ *False* | *True* ]
Draw a shadow beneath the pie.
*startangle*: [ *None* | Offset angle ]
If not *None*, rotates the start of the pie chart by *angle*
degrees counterclockwise from the x-axis.
*radius*: [ *None* | scalar ]
The radius of the pie, if *radius* is *None* it will be set to 1.
*counterclock*: [ *False* | *True* ]
Specify fractions direction, clockwise or counterclockwise.
*wedgeprops*: [ *None* | dict of key value pairs ]
Dict of arguments passed to the wedge objects making the pie.
For example, you can pass in wedgeprops = { 'linewidth' : 3 }
to set the width of the wedge border lines equal to 3.
For more details, look at the doc/arguments of the wedge object.
By default `clip_on=False`.
*textprops*: [ *None* | dict of key value pairs ]
Dict of arguments to pass to the text objects.
The pie chart will probably look best if the figure and axes are
square, or the Axes aspect is equal. e.g.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
or::
axes(aspect=1)
Return value:
If *autopct* is *None*, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx > 1:
x = np.divide(x, sx)
if labels is None:
labels = [''] * len(x)
if explode is None:
explode = [0] * len(x)
assert(len(x) == len(labels))
assert(len(x) == len(explode))
if colors is None:
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0, 0
if radius is None:
radius = 1
# Starting theta1 is the start fraction of the circle
if startangle is None:
theta1 = 0
else:
theta1 = startangle / 360.0
# set default values in wedge_prop
if wedgeprops is None:
wedgeprops = {}
if 'clip_on' not in wedgeprops:
wedgeprops['clip_on'] = False
if textprops is None:
textprops = {}
if 'clip_on' not in textprops:
textprops['clip_on'] = False
texts = []
slices = []
autotexts = []
i = 0
for frac, label, expl in cbook.safezip(x, labels, explode):
x, y = center
theta2 = (theta1 + frac) if counterclock else (theta1 - frac)
thetam = 2 * math.pi * 0.5 * (theta1 + theta2)
x += expl * math.cos(thetam)
y += expl * math.sin(thetam)
w = mpatches.Wedge((x, y), radius, 360. * min(theta1, theta2),
360. * max(theta1, theta2),
facecolor=colors[i % len(colors)],
**wedgeprops)
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02)
shad.set_zorder(0.9 * w.get_zorder())
shad.set_label('_nolegend_')
self.add_patch(shad)
xt = x + labeldistance * radius * math.cos(thetam)
yt = y + labeldistance * radius * math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center',
**textprops)
texts.append(t)
if autopct is not None:
xt = x + pctdistance * radius * math.cos(thetam)
yt = y + pctdistance * radius * math.sin(thetam)
if is_string_like(autopct):
s = autopct % (100. * frac)
elif six.callable(autopct):
s = autopct(100. * frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center',
**textprops)
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None:
return slices, texts
else:
return slices, texts, autotexts
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
**kwargs):
"""
Plot an errorbar graph.
Call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1,
capthick=None)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, or 2xN array-like ]
If a scalar number, len(N) array-like object, or an Nx1
array-like object, errorbars are drawn at +/-value relative
to the data.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
*fmt*: [ '' | 'none' | plot format string ]
The plot format symbol. If *fmt* is 'none' (case-insensitive),
only the errorbars are plotted. This is used for adding
errorbars to a bar plot, for example. Default is '',
an empty plot format string; properties are
then identical to the defaults for :meth:`plot`.
*ecolor*: [ *None* | mpl color ]
A matplotlib color arg which gives the color the errorbar lines;
if *None*, use the color of the line connecting the markers.
*elinewidth*: scalar
The linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
The length of the error bar caps in points
*capthick*: scalar
An alias kwarg to *markeredgewidth* (a.k.a. - *mew*). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
*barsabove*: [ *True* | *False* ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims* / *uplims* / *xlolims* / *xuplims*: [ *False* | *True* ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*. To use limits with inverted
axes, :meth:`set_xlim` or :meth:`set_ylim` must be called
before :meth:`errorbar`.
*errorevery*: positive integer
subsamples the errorbars. e.g., if everyerror=5, errorbars for
every 5-th datapoint will be plotted. The data plot itself still
shows all data points.
All other keyword arguments are passed on to the plot command for the
markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Returns (*plotline*, *caplines*, *barlinecols*):
*plotline*: :class:`~matplotlib.lines.Line2D` instance
*x*, *y* plot markers and/or line
*caplines*: list of error bar cap
:class:`~matplotlib.lines.Line2D` instances
*barlinecols*: list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/statistics/errorbar_demo.py
"""
if errorevery < 1:
raise ValueError(
'errorevery has to be a strictly positive integer')
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold:
self.cla()
holdstate = self._hold
self._hold = True
if fmt is None:
fmt = 'none'
msg = ('Use of None object as fmt keyword argument to '
+ 'suppress plotting of data values is deprecated '
+ 'since 1.4; use the string "none" instead.')
warnings.warn(msg, mplDeprecation, stacklevel=1)
plot_line = (fmt.lower() != 'none')
label = kwargs.pop("label", None)
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr] * len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr] * len(y)
l0 = None
# Instead of using zorder, the line plot is being added
# either here, or after all the errorbar plot elements.
if barsabove and plot_line:
l0, = self.plot(x, y, fmt, label="_nolegend_", **kwargs)
barcols = []
caplines = []
lines_kw = {'label': '_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
for key in ('linewidth', 'lw'):
if key in kwargs:
lines_kw[key] = kwargs[key]
for key in ('transform', 'alpha', 'zorder'):
if key in kwargs:
lines_kw[key] = kwargs[key]
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims] * len(x), bool)
else:
lolims = np.asarray(lolims, bool)
if not iterable(uplims):
uplims = np.array([uplims] * len(x), bool)
else:
uplims = np.asarray(uplims, bool)
if not iterable(xlolims):
xlolims = np.array([xlolims] * len(x), bool)
else:
xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims):
xuplims = np.array([xuplims] * len(x), bool)
else:
xuplims = np.asarray(xuplims, bool)
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs) == len(ys)
assert len(xs) == len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
plot_kw = {'label': '_nolegend_'}
if capsize > 0:
plot_kw['ms'] = 2. * capsize
if capthick is not None:
# 'mew' has higher priority, I believe,
# if both 'mew' and 'markeredgewidth' exists.
# So, save capthick to markeredgewidth so that
# explicitly setting mew or markeredgewidth will
# over-write capthick.
plot_kw['markeredgewidth'] = capthick
# For backwards-compat, allow explicit setting of
# 'mew' or 'markeredgewidth' to over-ride capthick.
for key in ('markeredgewidth', 'mew', 'transform', 'alpha', 'zorder'):
if key in kwargs:
plot_kw[key] = kwargs[key]
if xerr is not None:
if (iterable(xerr) and len(xerr) == 2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx - thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr[0])]
right = [thisx + thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx - thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr)]
right = [thisx + thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr)]
# select points without upper/lower limits in x and
# draw normal errorbars for these points
noxlims = ~(xlolims | xuplims)
if noxlims.any():
yo, _ = xywhere(y, right, noxlims & everymask)
lo, ro = xywhere(left, right, noxlims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
if capsize > 0:
caplines.extend(self.plot(lo, yo, 'k|', **plot_kw))
caplines.extend(self.plot(ro, yo, 'k|', **plot_kw))
if xlolims.any():
yo, _ = xywhere(y, right, xlolims & everymask)
lo, ro = xywhere(x, right, xlolims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
rightup, yup = xywhere(right, y, xlolims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETLEFT
else:
marker = mlines.CARETRIGHT
caplines.extend(
self.plot(rightup, yup, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xlo, ylo = xywhere(x, y, xlolims & everymask)
caplines.extend(self.plot(xlo, ylo, 'k|', **plot_kw))
if xuplims.any():
yo, _ = xywhere(y, right, xuplims & everymask)
lo, ro = xywhere(left, x, xuplims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
leftlo, ylo = xywhere(left, y, xuplims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETRIGHT
else:
marker = mlines.CARETLEFT
caplines.extend(
self.plot(leftlo, ylo, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xup, yup = xywhere(x, y, xuplims & everymask)
caplines.extend(self.plot(xup, yup, 'k|', **plot_kw))
if yerr is not None:
if (iterable(yerr) and len(yerr) == 2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy - thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr[0])]
upper = [thisy + thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy - thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr)]
upper = [thisy + thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr)]
# select points without upper/lower limits in y and
# draw normal errorbars for these points
noylims = ~(lolims | uplims)
if noylims.any():
xo, _ = xywhere(x, lower, noylims & everymask)
lo, uo = xywhere(lower, upper, noylims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
if capsize > 0:
caplines.extend(self.plot(xo, lo, 'k_', **plot_kw))
caplines.extend(self.plot(xo, uo, 'k_', **plot_kw))
if lolims.any():
xo, _ = xywhere(x, lower, lolims & everymask)
lo, uo = xywhere(y, upper, lolims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
xup, upperup = xywhere(x, upper, lolims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETDOWN
else:
marker = mlines.CARETUP
caplines.extend(
self.plot(xup, upperup, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xlo, ylo = xywhere(x, y, lolims & everymask)
caplines.extend(self.plot(xlo, ylo, 'k_', **plot_kw))
if uplims.any():
xo, _ = xywhere(x, lower, uplims & everymask)
lo, uo = xywhere(lower, y, uplims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
xlo, lowerlo = xywhere(x, lower, uplims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETUP
else:
marker = mlines.CARETDOWN
caplines.extend(
self.plot(xlo, lowerlo, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xup, yup = xywhere(x, y, uplims & everymask)
caplines.extend(self.plot(xup, yup, 'k_', **plot_kw))
if not barsabove and plot_line:
l0, = self.plot(x, y, fmt, **kwargs)
if ecolor is None:
if l0 is None:
ecolor = six.next(self._get_lines.color_cycle)
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer((l0, tuple(caplines),
tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
def boxplot(self, x, notch=False, sym=None, vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None,
capprops=None, whiskerprops=None, manage_xticks=True):
"""
Make a box and whisker plot.
Call signature::
boxplot(self, x, notch=False, sym='b+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None,
capprops=None, whiskerprops=None, manage_xticks=True):
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
x : Array or a sequence of vectors.
The input data.
notch : bool, default = False
If False, produces a rectangular box plot.
If True, will produce a notched box plot
sym : str or None, default = None
The default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
If `None`, then the fliers default to 'b+' If you want more
control use the flierprops kwarg.
vert : bool, default = True
If True (default), makes the boxes vertical.
If False, makes horizontal boxes.
whis : float, sequence (default = 1.5) or string
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, IQR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. Set this to an unreasonably
high value to force the whiskers to show the min and max values.
Alternatively, set this to an ascending sequence of percentile
(e.g., [5, 95]) to set the whiskers at specific percentiles of
the data. Finally, *whis* can be the string 'range' to force the
whiskers to the min and max of the data. In the edge case that
the 25th and 75th percentiles are equivalent, *whis* will be
automatically set to 'range'.
bootstrap : None (default) or integer
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If bootstrap==None,
no bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation (see McGill, R.,
Tukey, J.W., and Larsen, W.A., 1978, and Kendall and Stuart,
1967). Otherwise, bootstrap specifies the number of times to
bootstrap the median to determine it's 95% confidence intervals.
Values between 1000 and 10000 are recommended.
usermedians : array-like or None (default)
An array or sequence whose first dimension (or length) is
compatible with *x*. This overrides the medians computed by
matplotlib for each element of *usermedians* that is not None.
When an element of *usermedians* == None, the median will be
computed by matplotlib as normal.
conf_intervals : array-like or None (default)
Array or sequence whose first dimension (or length) is compatible
with *x* and whose second dimension is 2. When the current element
of *conf_intervals* is not None, the notch locations computed by
matplotlib are overridden (assuming notch is True). When an
element of *conf_intervals* is None, boxplot compute notches the
method specified by the other kwargs (e.g., *bootstrap*).
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = 0.5
Either a scalar or a vector and sets the width of each box. The
default is 0.5, or ``0.15*(distance between extreme positions)``
if that is smaller.
labels : sequence or None (default)
Labels for each dataset. Length must be compatible with
dimensions of *x*
patch_artist : bool, default = False
If False produces boxes with the Line2D artist
If True produces boxes with the Patch artist
showmeans : bool, default = False
If True, will toggle one the rendering of the means
showcaps : bool, default = True
If True, will toggle one the rendering of the caps
showbox : bool, default = True
If True, will toggle one the rendering of box
showfliers : bool, default = True
If True, will toggle one the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided, will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If True (and *showmeans* is True), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created. That dictionary has the following keys
(assuming vertical boxplots):
- boxes: the main body of the boxplot showing the quartiles
and the median's confidence intervals if enabled.
- medians: horizonal lines at the median of each box.
- whiskers: the vertical lines extending to the most extreme,
n-outlier data points.
- caps: the horizontal lines at the ends of the whiskers.
- fliers: points representing data that extend beyond the
whiskers (outliers).
- means: points or lines representing the means.
Examples
--------
.. plot:: mpl_examples/statistics/boxplot_demo.py
"""
bxpstats = cbook.boxplot_stats(x, whis=whis, bootstrap=bootstrap,
labels=labels)
# make sure we have a dictionary
if flierprops is None:
flierprops = dict()
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == '':
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle='none', marker='',
color='none')
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops['marker'] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops['color'] = color
# replace medians if necessary:
if usermedians is not None:
if (len(np.ravel(usermedians)) != len(bxpstats) or
np.shape(usermedians)[0] != len(bxpstats)):
medmsg = 'usermedians length not compatible with x'
raise ValueError(medmsg)
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats['med'] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
raise ValueError('conf_intervals length not '
'compatible with x')
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError('each confidence interval must '
'have two values')
else:
if ci[0] is not None:
stats['cilo'] = ci[0]
if ci[1] is not None:
stats['cihi'] = ci[1]
artists = self.bxp(bxpstats, positions=positions, widths=widths,
vert=vert, patch_artist=patch_artist,
shownotches=notch, showmeans=showmeans,
showcaps=showcaps, showbox=showbox,
boxprops=boxprops, flierprops=flierprops,
medianprops=medianprops, meanprops=meanprops,
meanline=meanline, showfliers=showfliers,
capprops=capprops, whiskerprops=whiskerprops,
manage_xticks=manage_xticks)
return artists
def bxp(self, bxpstats, positions=None, widths=None, vert=True,
patch_artist=False, shownotches=False, showmeans=False,
showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_xticks=True):
"""
Drawing function for box and whisker plots.
Call signature::
bxp(self, bxpstats, positions=None, widths=None, vert=True,
patch_artist=False, shownotches=False, showmeans=False,
showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_xticks=True):
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
bxpstats : list of dicts
A list of dictionaries containing stats for each boxplot.
Required keys are:
- ``med``: The median (scalar float).
- ``q1``: The first quartile (25th percentile) (scalar
float).
- ``q3``: The first quartile (50th percentile) (scalar
float).
- ``whislo``: Lower bound of the lower whisker (scalar
float).
- ``whishi``: Upper bound of the upper whisker (scalar
float).
Optional keys are:
- ``mean``: The mean (scalar float). Needed if
``showmeans=True``.
- ``fliers``: Data beyond the whiskers (sequence of floats).
Needed if ``showfliers=True``.
- ``cilo`` & ``cihi``: Lower and upper confidence intervals
about the median. Needed if ``shownotches=True``.
- ``label``: Name of the dataset (string). If available,
this will be used a tick label for the boxplot
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = 0.5
Either a scalar or a vector and sets the width of each
box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
vert : bool, default = False
If `True` (default), makes the boxes vertical. If `False`,
makes horizontal boxes.
patch_artist : bool, default = False
If `False` produces boxes with the
`~matplotlib.lines.Line2D` artist. If `True` produces boxes
with the `~matplotlib.patches.Patch` artist.
shownotches : bool, default = False
If `False` (default), produces a rectangular box plot.
If `True`, will produce a notched box plot
showmeans : bool, default = False
If `True`, will toggle one the rendering of the means
showcaps : bool, default = True
If `True`, will toggle one the rendering of the caps
showbox : bool, default = True
If `True`, will toggle one the rendering of box
showfliers : bool, default = True
If `True`, will toggle one the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If `True` (and *showmeans* is `True`), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
manage_xticks : bool, default = True
If the function should adjust the xlim and xtick locations.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot to a list
of the :class:`matplotlib.lines.Line2D` instances
created. That dictionary has the following keys (assuming
vertical boxplots):
- ``boxes``: the main body of the boxplot showing the
quartiles and the median's confidence intervals if
enabled.
- ``medians``: horizonal lines at the median of each box.
- ``whiskers``: the vertical lines extending to the most
extreme, n-outlier data points.
- ``caps``: the horizontal lines at the ends of the
whiskers.
- ``fliers``: points representing data that extend beyond
the whiskers (fliers).
- ``means``: points or lines representing the means.
Examples
--------
.. plot:: mpl_examples/statistics/bxp_demo.py
"""
# lists of artists to be output
whiskers = []
caps = []
boxes = []
medians = []
means = []
fliers = []
# empty list of xticklabels
datalabels = []
# translates between line2D and patch linestyles
linestyle_map = {
'solid': '-',
'dashed': '--',
'dashdot': '-.',
'dotted': ':'
}
# box properties
if patch_artist:
final_boxprops = dict(linestyle='solid', edgecolor='black',
facecolor='white', linewidth=1)
else:
final_boxprops = dict(linestyle='-', color='blue')
if boxprops is not None:
final_boxprops.update(boxprops)
# other (cap, whisker) properties
final_whiskerprops = dict(
linestyle='--',
color='blue',
)
final_capprops = dict(
linestyle='-',
color='black',
)
if capprops is not None:
final_capprops.update(capprops)
if whiskerprops is not None:
final_whiskerprops.update(whiskerprops)
# set up the default flier properties
final_flierprops = dict(linestyle='none', marker='+', color='blue')
# flier (outlier) properties
if flierprops is not None:
final_flierprops.update(flierprops)
# median line properties
final_medianprops = dict(linestyle='-', color='red')
if medianprops is not None:
final_medianprops.update(medianprops)
# mean (line or point) properties
if meanline:
final_meanprops = dict(linestyle='--', color='black')
else:
final_meanprops = dict(linestyle='none', markerfacecolor='red',
marker='s')
if meanprops is not None:
final_meanprops.update(meanprops)
def to_vc(xs, ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi, yi in zip(xs, ys):
verts.append((xi, yi))
verts.append((0, 0)) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO] * (len(verts) - 2) + \
[mpath.Path.CLOSEPOLY]
return verts, codes
def patch_list(xs, ys, **kwargs):
verts, codes = to_vc(xs, ys)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, **kwargs)
self.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args, **kwargs):
return self.plot(*args, **kwargs)
def dopatch(xs, ys, **kwargs):
return patch_list(xs, ys, **kwargs)
else:
def doplot(*args, **kwargs):
shuffled = []
for i in xrange(0, len(args), 2):
shuffled.extend([args[i + 1], args[i]])
return self.plot(*shuffled, **kwargs)
def dopatch(xs, ys, **kwargs):
xs, ys = ys, xs # flip X, Y
return patch_list(xs, ys, **kwargs)
# input validation
N = len(bxpstats)
datashape_message = ("List of boxplot statistics and `{0}` "
"values must have same the length")
# check position
if positions is None:
positions = list(xrange(1, N + 1))
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# width
if widths is None:
distance = max(positions) - min(positions)
widths = [min(0.15 * max(distance, 1.0), 0.5)] * N
elif np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# check and save the `hold` state of the current axes
if not self._hold:
self.cla()
holdStatus = self._hold
for pos, width, stats in zip(positions, widths, bxpstats):
# try to find a new label
datalabels.append(stats.get('label', pos))
# fliers coords
flier_x = np.ones(len(stats['fliers'])) * pos
flier_y = stats['fliers']
# whisker coords
whisker_x = np.ones(2) * pos
whiskerlo_y = np.array([stats['q1'], stats['whislo']])
whiskerhi_y = np.array([stats['q3'], stats['whishi']])
# cap coords
cap_left = pos - width * 0.25
cap_right = pos + width * 0.25
cap_x = np.array([cap_left, cap_right])
cap_lo = np.ones(2) * stats['whislo']
cap_hi = np.ones(2) * stats['whishi']
# box and median coords
box_left = pos - width * 0.5
box_right = pos + width * 0.5
med_y = [stats['med'], stats['med']]
# notched boxes
if shownotches:
box_x = [box_left, box_right, box_right, cap_right, box_right,
box_right, box_left, box_left, cap_left, box_left,
box_left]
box_y = [stats['q1'], stats['q1'], stats['cilo'],
stats['med'], stats['cihi'], stats['q3'],
stats['q3'], stats['cihi'], stats['med'],
stats['cilo'], stats['q1']]
med_x = cap_x
# plain boxes
else:
box_x = [box_left, box_right, box_right, box_left, box_left]
box_y = [stats['q1'], stats['q1'], stats['q3'], stats['q3'],
stats['q1']]
med_x = [box_left, box_right]
# maybe draw the box:
if showbox:
if patch_artist:
boxes.extend(dopatch(box_x, box_y, **final_boxprops))
else:
boxes.extend(doplot(box_x, box_y, **final_boxprops))
# draw the whiskers
whiskers.extend(doplot(
whisker_x, whiskerlo_y, **final_whiskerprops
))
whiskers.extend(doplot(
whisker_x, whiskerhi_y, **final_whiskerprops
))
# maybe draw the caps:
if showcaps:
caps.extend(doplot(cap_x, cap_lo, **final_capprops))
caps.extend(doplot(cap_x, cap_hi, **final_capprops))
# draw the medians
medians.extend(doplot(med_x, med_y, **final_medianprops))
# maybe draw the means
if showmeans:
if meanline:
means.extend(doplot(
[box_left, box_right], [stats['mean'], stats['mean']],
**final_meanprops
))
else:
means.extend(doplot(
[pos], [stats['mean']], **final_meanprops
))
# maybe draw the fliers
if showfliers:
fliers.extend(doplot(
flier_x, flier_y, **final_flierprops
))
# fix our axes/ticks up a little
if vert:
setticks = self.set_xticks
setlim = self.set_xlim
setlabels = self.set_xticklabels
else:
setticks = self.set_yticks
setlim = self.set_ylim
setlabels = self.set_yticklabels
if manage_xticks:
newlimits = min(positions) - 0.5, max(positions) + 0.5
setlim(newlimits)
setticks(positions)
setlabels(datalabels)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers, means=means)
@docstring.dedent_interpd
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=None, **kwargs):
"""
Make a scatter plot of x vs y, where x and y are sequence like objects
of the same lengths.
Parameters
----------
x, y : array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, ), optional, default: 20
size in points^2.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs
(see below). Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however.
marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
See `~matplotlib.markers` for more information on the different
styles of markers scatter supports.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `~matplotlib.colors.Colormap` instance or registered name.
`cmap` is only used if `c` is an array of floats. If None,
defaults to rc `image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. `norm` is only used if `c` is an array of
floats. If `None`, use the default :func:`normalize`.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,). Note that this is a
tuple, and if you set the linewidths argument you must set it as a
sequence of floats, as required by
`~matplotlib.collections.RegularPolyCollection`.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
Notes
------
Any or all of `x`, `y`, `s`, and `c` may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Examples
--------
.. plot:: mpl_examples/shapes_and_collections/scatter_demo.py
"""
if not self._hold:
self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
c_is_stringy = is_string_like(c) or is_sequence_of_strings(c)
if not c_is_stringy:
c = np.asanyarray(c)
if c.size == x.size:
c = np.ma.ravel(c)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
if c_is_stringy:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if c.size == x.size:
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
faceted = kwargs.pop('faceted', None)
edgecolors = kwargs.get('edgecolors', None)
if faceted is not None:
cbook.warn_deprecated(
'1.2', name='faceted', alternative='edgecolor',
obj_type='option')
if faceted:
edgecolors = None
else:
edgecolors = 'none'
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = 'face'
offsets = np.dstack((x, y))
collection = mcoll.PathCollection(
(path,), scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop('transform', self.transData),
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def hexbin(self, x, y, C=None, gridsize=100, bins=None,
xscale='linear', yscale='linear', extent=None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none',
reduce_C_function=np.mean, mincnt=None, marginals=False,
**kwargs):
"""
Make a hexagonal binning plot.
Call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none'
reduce_C_function = np.mean, mincnt=None, marginals=True
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ *None* | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
*mincnt*: [ *None* | a positive integer ]
If not *None*, only display cells with more than *mincnt*
number of points in the cell
*marginals*: [ *True* | *False* ]
if marginals is *True*, plot the marginal density as
colormapped rectagles along the bottom of the x-axis and
left of the y-axis
*extent*: [ *None* | scalars (left, right, bottom, top) ]
The limits of the bins. The default assigns the limits
based on gridsize, x, y, xscale and yscale.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ *None* | Colormap ]
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ *None* | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin* / *vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar between 0 and 1, or *None*
the alpha value for the patches
*linewidths*: [ *None* | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ *None* | ``'none'`` | mpl color | color sequence ]
If ``'none'``, draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collections.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon. If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold:
self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx / math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale == 'log':
if np.any(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = np.log10(x)
if yscale == 'log':
if np.any(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = np.log10(y)
if extent is not None:
xmin, xmax, ymin, ymax = extent
else:
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# to avoid issues with singular data, expand the min/max pairs
xmin, xmax = mtrans.nonsingular(xmin, xmax, expander=0.1)
ymin, ymax = mtrans.nonsingular(ymin, ymax, expander=0.1)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax - xmin) / nx
sy = (ymax - ymin) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x - xmin) / sx
y = (y - ymin) / sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1 * ny1 + nx2 * ny2
d1 = (x - ix1) ** 2 + 3.0 * (y - iy1) ** 2
d2 = (x - ix2 - 0.5) ** 2 + 3.0 * (y - iy2 - 0.5) ** 2
bdist = (d1 < d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1 * ny1]
lattice2 = accum[nx1 * ny1:]
lattice1.shape = (nx1, ny1)
lattice2.shape = (nx2, ny2)
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]] += 1
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]] += 1
# threshold
if mincnt is not None:
for i in xrange(nx1):
for j in xrange(ny1):
if lattice1[i, j] < mincnt:
lattice1[i, j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
if lattice2[i, j] < mincnt:
lattice2[i, j] = np.nan
accum = np.hstack((lattice1.astype(float).ravel(),
lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
else:
if mincnt is None:
mincnt = 0
# create accumulation arrays
lattice1 = np.empty((nx1, ny1), dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i, j] = []
lattice2 = np.empty((nx2, ny2), dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i, j] = []
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]].append(C[i])
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]].append(C[i])
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i, j]
if len(vals) > mincnt:
lattice1[i, j] = reduce_C_function(vals)
else:
lattice1[i, j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i, j]
if len(vals) > mincnt:
lattice2[i, j] = reduce_C_function(vals)
else:
lattice2[i, j] = np.nan
accum = np.hstack((lattice1.astype(float).ravel(),
lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
offsets = np.zeros((n, 2), float)
offsets[:nx1 * ny1, 0] = np.repeat(np.arange(nx1), ny1)
offsets[:nx1 * ny1, 1] = np.tile(np.arange(ny1), nx1)
offsets[nx1 * ny1:, 0] = np.repeat(np.arange(nx2) + 0.5, ny2)
offsets[nx1 * ny1:, 1] = np.tile(np.arange(ny2), nx2) + 0.5
offsets[:, 0] *= sx
offsets[:, 1] *= sy
offsets[:, 0] += xmin
offsets[:, 1] += ymin
# remove accumulation bins with no data
offsets = offsets[good_idxs, :]
accum = accum[good_idxs]
polygon = np.zeros((6, 2), float)
polygon[:, 0] = sx * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:, 1] = sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
if edgecolors == 'none':
edgecolors = 'face'
if xscale == 'log' or yscale == 'log':
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xmin = 10.0 ** xmin
xmax = 10.0 ** xmax
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
ymin = 10.0 ** ymin
ymax = 10.0 ** ymax
self.set_yscale(yscale)
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
)
else:
collection = mcoll.PolyCollection(
[polygon],
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=mtransforms.IdentityTransform(),
offset_position="data"
)
if isinstance(norm, mcolors.LogNorm):
if (accum == 0).any():
# make sure we have not zeros
accum += 1
# autoscale the norm with curren accum values if it hasn't
# been set
if norm is not None:
if norm.vmin is None and norm.vmax is None:
norm.autoscale(accum)
# Transform accum if needed
if bins == 'log':
accum = np.log10(accum + 1)
elif bins is not None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins -= 1 # one less edge than bins
bins = minimum + (maximum - minimum) * np.arange(bins) / bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim(corners)
self.autoscale_view(tight=True)
# add the collection last
self.add_collection(collection, autolim=False)
if not marginals:
return collection
if C is None:
C = np.ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.searchsorted(x).clip(0, len(coarse) - 1)
mus = np.zeros(len(coarse))
for i in range(len(coarse)):
mu = reduce_C_function(y[ind == i])
mus[i] = mu
return mus
coarse = np.linspace(xmin, xmax, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~np.isnan(xcoarse)
verts, values = [], []
for i, val in enumerate(xcoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(thismin, 0),
(thismin, 0.05),
(thismax, 0.05),
(thismax, 0)])
values.append(val)
values = np.array(values)
trans = self.get_xaxis_transform(which='grid')
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_array(values)
hbar.set_cmap(cmap)
hbar.set_norm(norm)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_collection(hbar, autolim=False)
coarse = np.linspace(ymin, ymax, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~np.isnan(ycoarse)
verts, values = [], []
for i, val in enumerate(ycoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(0, thismin), (0.0, thismax),
(0.05, thismax), (0.05, thismin)])
values.append(val)
values = np.array(values)
trans = self.get_yaxis_transform(which='grid')
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_array(values)
vbar.set_cmap(cmap)
vbar.set_norm(norm)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_collection(vbar, autolim=False)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.callbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Add an arrow to the axes.
Call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*). Uses FancyArrow patch to construct the arrow.
The resulting arrow is affected by the axes aspect ratio and limits.
This may produce an arrow whose head is not square with its stem. To
create an arrow whose head is square with its stem, use
:meth:`annotate` for example::
ax.annotate("", xy=(0.5, 0.5), xytext=(0, 0),
arrowprops=dict(arrowstyle="->"))
Optional kwargs control the arrow construction and properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold:
self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, autolim=True)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def stackplot(self, x, *args, **kwargs):
return mstack.stackplot(self, x, *args, **kwargs)
stackplot.__doc__ = mstack.stackplot.__doc__
def streamplot(self, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None, zorder=1):
if not self._hold:
self.cla()
stream_container = mstream.streamplot(self, x, y, u, v,
density=density,
linewidth=linewidth,
color=color,
cmap=cmap,
norm=norm,
arrowsize=arrowsize,
arrowstyle=arrowstyle,
minlength=minlength,
transform=transform,
zorder=zorder)
return stream_container
streamplot.__doc__ = mstream.streamplot.__doc__
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold:
self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b, autolim=True)
self.autoscale_view()
return b
@docstring.dedent_interpd
def fill(self, *args, **kwargs):
"""
Plot filled polygons.
Call signature::
fill(*args, **kwargs)
*args* is a variable length argument, allowing for multiple
*x*, *y* pairs with an optional color format string; see
:func:`~matplotlib.pyplot.plot` for details on the argument
parsing. For example, to plot a polygon with vertices at *x*,
*y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, e.g., shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/lines_bars_and_markers/fill_demo.py
"""
if not self._hold:
self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch(poly)
patches.append(poly)
self.autoscale_view()
return patches
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
**kwargs):
"""
Make filled polygons between two curves.
Call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x* :
An N-length array of the x data
*y1* :
An N-length array (or scalar) of the y data
*y2* :
An N-length array (or scalar) of the y data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is an N-length numpy boolean array and the fill will
only happen over the regions where ``where==True``.
*interpolate* :
If *True*, interpolate between the two lines to find the
precise point of intersection. Otherwise, the start and
end points of the filled region will only occur on explicit
values in the *x* array.
*kwargs* :
Keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`.
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between_demo.py
.. seealso::
:meth:`fill_betweenx`
for filling between two sets of x-values
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = ma.masked_invalid(self.convert_xunits(x))
y1 = ma.masked_invalid(self.convert_yunits(y1))
y2 = ma.masked_invalid(self.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x) * y1
if y2.ndim == 0:
y2 = np.ones_like(x) * y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2 * N + 2, 2), np.float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind - 1, 0)
x_values = x[im1:ind + 1]
diff_values = y1[im1:ind + 1] - y2[im1:ind + 1]
y1_values = y1[im1:ind + 1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
diff_root_y = np.interp(diff_root_x, x_values, y1_values)
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N + 1] = end
X[1:N + 1, 0] = xslice
X[1:N + 1, 1] = y1slice
X[N + 2:, 0] = xslice[::-1]
X[N + 2:, 1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, where=None, **kwargs):
"""
Make filled polygons between two horizontal curves.
Call signature::
fill_betweenx(y, x1, x2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *x1* and *x2* where
``where==True``
*y* :
An N-length array of the y data
*x1* :
An N-length array (or scalar) of the x data
*x2* :
An N-length array (or scalar) of the x data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs* :
keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_betweenx_demo.py
.. seealso::
:meth:`fill_between`
for filling between two sets of y-values
"""
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the arrays so we can work with them
y = ma.masked_invalid(self.convert_yunits(y))
x1 = ma.masked_invalid(self.convert_xunits(x1))
x2 = ma.masked_invalid(self.convert_xunits(x2))
if x1.ndim == 0:
x1 = np.ones_like(y) * x1
if x2.ndim == 0:
x2 = np.ones_like(y) * x2
if where is None:
where = np.ones(len(y), np.bool)
else:
where = np.asarray(where, np.bool)
if not (y.shape == x1.shape == x2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (y, x1, x2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
yslice = y[ind0:ind1]
x1slice = x1[ind0:ind1]
x2slice = x2[ind0:ind1]
if not len(yslice):
continue
N = len(yslice)
Y = np.zeros((2 * N + 2, 2), np.float)
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the x1 sample points do
Y[0] = x2slice[0], yslice[0]
Y[N + 1] = x2slice[-1], yslice[-1]
Y[1:N + 1, 0] = x1slice
Y[1:N + 1, 1] = yslice
Y[N + 2:, 0] = x2slice[::-1]
Y[N + 2:, 1] = yslice[::-1]
polys.append(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = np.array([x1[where], y[where]]).T
X2Y = np.array([x2[where], y[where]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=True, updatey=False)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@docstring.dedent_interpd
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Display an image on the axes.
Parameters
-----------
X : array_like, shape (n, m) or (n, m, 3) or (n, m, 4)
Display the image in `X` to current axes. `X` may be a float
array, a uint8 array or a PIL image. If `X` is an array, it
can have the following shapes:
- MxN -- luminance (grayscale, float array only)
- MxNx3 -- RGB (float or uint8 array)
- MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays
should be in the range 0.0 to 1.0; MxN float arrays may be
normalised.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
If None, default to rc `image.cmap` value. `cmap` is ignored when
`X` has RGB(A) information
aspect : ['auto' | 'equal' | scalar], optional, default: None
If 'auto', changes the image aspect ratio to match that of the
axes.
If 'equal', and `extent` is None, changes the axes aspect ratio to
match that of the image. If `extent` is not `None`, the axes
aspect ratio is changed to match that of the extent.
If None, default to rc ``image.aspect`` value.
interpolation : string, optional, default: None
Acceptable values are 'none', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc',
'lanczos'
If `interpolation` is None, default to rc `image.interpolation`.
See also the `filternorm` and `filterrad` parameters.
If `interpolation` is 'none', then no interpolation is performed
on the Agg, ps and pdf backends. Other backends will fall back to
'nearest'.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. If `None`, use the default
func:`normalize`. `norm` is only used if `X` is an array of
floats.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
origin : ['upper' | 'lower'], optional, default: None
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If None, default to rc `image.origin`.
extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
shape : scalars (columns, rows), optional, default: None
For raw buffer images
filternorm : scalar, optional, default: 1
A parameter for the antigrain image resize filter. From the
antigrain documentation, if `filternorm` = 1, the filter
normalizes integer values and corrects the rounding errors. It
doesn't do anything with the source floating point values, it
corrects only integers according to the rule of 1.0 which means
that any sum of pixel weights must be equal to 1.0. So, the
filter function must produce a graph of the proper shape.
filterrad : scalar, optional, default: 4.0
The filter radius for filters that have a radius parameter, i.e.
when interpolation is one of: 'sinc', 'lanczos' or 'blackman'
Returns
--------
image : `~matplotlib.image.AxesImage`
Other parameters
----------------
kwargs : `~matplotlib.artist.Artist` properties.
See also
--------
matshow : Plot a matrix or an array as an image.
Examples
--------
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold:
self.cla()
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
if aspect is None:
aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.add_image(im)
return im
@staticmethod
def _pcolorargs(funcname, *args, **kw):
# This takes one kwarg, allmatch.
# If allmatch is True, then the incoming X, Y, C must
# have matching dimensions, taking into account that
# X and Y can be 1-D rather than 2-D. This perfect
# match is required for Gouroud shading. For flat
# shading, X and Y specify boundaries, so we need
# one more boundary than color in each direction.
# For convenience, and consistent with Matlab, we
# discard the last row and/or column of C if necessary
# to meet this condition. This is done if allmatch
# is False.
allmatch = kw.pop("allmatch", False)
if len(args) == 1:
C = args[0]
numRows, numCols = C.shape
if allmatch:
X, Y = np.meshgrid(np.arange(numCols), np.arange(numRows))
else:
X, Y = np.meshgrid(np.arange(numCols + 1),
np.arange(numRows + 1))
return X, Y, C
if len(args) == 3:
X, Y, C = args
numRows, numCols = C.shape
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) != 2 or X.shape[0] == 1:
x = X.reshape(1, Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) != 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
if allmatch:
if not (Nx == numCols and Ny == numRows):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
else:
if not (numCols in (Nx, Nx - 1) and numRows in (Ny, Ny - 1)):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
C = C[:Ny - 1, :Nx - 1]
return X, Y, C
@docstring.dedent_interpd
def pcolor(self, *args, **kwargs):
"""
Create a pseudocolor plot of a 2-D array.
.. note::
pcolor can be very slow for large arrays; consider
using the similar but much faster
:func:`~matplotlib.pyplot.pcolormesh` instead.
Call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='none'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'none'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'none'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
*snap*: bool
Whether to snap the mesh to pixel boundaries.
Return value is a :class:`matplotlib.collections.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the MATLAB convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
is equivalent to::
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand(len(x), len(y))
then you need to transpose C::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
MATLAB :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collections.PolyCollection` properties:
%(PolyCollection)s
.. note::
The default *antialiaseds* is False if the default
*edgecolors*="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of
alpha. If *edgecolors* is not "none", then the default
*antialiaseds* is taken from
rcParams['patch.antialiased'], which defaults to *True*.
Stroking the edges may be preferred if *alpha* is 1, but
will cause artifacts otherwise.
.. seealso::
:func:`~matplotlib.pyplot.pcolormesh`
For an explanation of the differences between
pcolor and pcolormesh.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if 'shading' in kwargs:
cbook.warn_deprecated(
'1.2', name='shading', alternative='edgecolors',
obj_type='option')
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args, allmatch=False)
Ny, Nx = X.shape
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X) + ma.getmaskarray(Y)
xymask = (mask[0:-1, 0:-1] + mask[1:, 1:] +
mask[0:-1, 1:] + mask[1:, 0:-1])
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C) + xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask == 0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1, 0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1, 0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:, 0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:, 0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:, 1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:, 1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1, 1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1, 1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:, newaxis], Y1[:, newaxis],
X2[:, newaxis], Y2[:, newaxis],
X3[:, newaxis], Y3[:, newaxis],
X4[:, newaxis], Y4[:, newaxis],
X1[:, newaxis], Y1[:, newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
C = compress(ravelmask, ma.filled(C[0:Ny - 1, 0:Nx - 1]).ravel())
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if shading == 'faceted':
edgecolors = 'k',
else:
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
# aa setting will default via collections to patch.antialiased
# unless the boundary is not stroked, in which case the
# default will be False; with unstroked boundaries, aa
# makes artifacts that are often disturbing.
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and (is_string_like(ec) and
ec.lower() == "none"):
kwargs['antialiaseds'] = False
kwargs.setdefault('snap', False)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([x, y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
self.add_collection(collection, autolim=False)
return collection
@docstring.dedent_interpd
def pcolormesh(self, *args, **kwargs):
"""
Plot a quadrilateral mesh.
Call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
Create a pseudocolor plot of a 2-D array.
pcolormesh is similar to :func:`~matplotlib.pyplot.pcolor`,
but uses a different mechanism and returns a different
object; pcolor returns a
:class:`~matplotlib.collections.PolyCollection` but pcolormesh
returns a
:class:`~matplotlib.collections.QuadMesh`. It is much faster,
so it is almost always preferred for large arrays.
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'gouraud' ]
'flat' indicates a solid color for each quad. When
'gouraud', each quad will be Gouraud shaded. When gouraud
shading, edgecolors is ignored.
*edgecolors*: [*None* | ``'None'`` | ``'face'`` | color |
color sequence]
If *None*, the rc setting is used by default.
If ``'None'``, edges will not be visible.
If ``'face'``, edges will have the same color as the faces.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh` properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat').lower()
antialiased = kwargs.pop('antialiased', False)
kwargs.setdefault('edgecolors', 'None')
allmatch = (shading == 'gouraud')
X, Y, C = self._pcolorargs('pcolormesh', *args, allmatch=allmatch)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = C.ravel()
X = X.ravel()
Y = Y.ravel()
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords,
antialiased=antialiased, shading=shading, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([X, Y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
X = transformed_pts[..., 0]
Y = transformed_pts[..., 1]
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
self.add_collection(collection, autolim=False)
return collection
@docstring.dedent_interpd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a pcolor-type method that
provides the fastest possible rendering with the Agg
backend, and that can handle any quadrilateral grid.
It supports only flat shading (no outlines), it lacks
support for log scaling of the axes, and it does not
have a pyplot wrapper.
Call signatures::
ax.pcolorfast(C, **kwargs)
ax.pcolorfast(xr, yr, C, **kwargs)
ax.pcolorfast(x, y, C, **kwargs)
ax.pcolorfast(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``ax.pcolorfast(C, **kwargs)`` is equivalent to
``ax.pcolorfast([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance from cm. If *None*,
use rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to scale
luminance data to 0,1. If *None*, defaults to normalize()
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max
of the color array *C* is used. If you pass a norm instance,
*vmin* and *vmax* will be *None*.
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a :class:`~matplotlib.collections.QuadMesh`
collection in the general quadrilateral case.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01 * np.abs(dx.mean()) and
np.ptp(dy) < 0.01 * np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc + 1
Ny = nr + 1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0, edgecolors="None")
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection, autolim=False)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.add_image(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.add_image(im)
ret = im
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold:
self.cla()
kwargs['filled'] = False
return mcontour.QuadContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.QuadContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold:
self.cla()
kwargs['filled'] = True
return mcontour.QuadContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.QuadContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
@docstring.dedent_interpd
def table(self, **kwargs):
"""
Add a table to the current axes.
Call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Returns a :class:`matplotlib.table.Table` instance. For finer
grained control over tables, use the
:class:`~matplotlib.table.Table` class and add it to the axes
with :meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
#### Data analysis
@docstring.dedent_interpd
def hist(self, x, bins=10, range=None, normed=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None, stacked=False,
**kwargs):
"""
Plot a histogram.
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Multiple data can be provided via *x* as a list of datasets
of potentially different length ([*x0*, *x1*, ...]), or as
a 2-D ndarray in which each column is a dataset. Note that
the ndarray form is transposed relative to the list form.
Masked arrays are not supported at present.
Parameters
----------
x : (n,) array or sequence of (n,) arrays
Input values, this takes either a single array or a sequency of
arrays which are not required to be of the same length
bins : integer or array_like, optional
If an integer is given, `bins + 1` bin edges are returned,
consistently with :func:`numpy.histogram` for numpy version >=
1.3.
Unequally spaced bins are supported if `bins` is a sequence.
default is 10
range : tuple or None, optional
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, `range` is (x.min(), x.max()). Range
has no effect if `bins` is a sequence.
If `bins` is a sequence or `range` is specified, autoscaling
is based on the specified bin range instead of the
range of x.
Default is ``None``
normed : boolean, optional
If `True`, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)`dbin)``, i.e., the integral of the histogram will sum
to 1. If *stacked* is also *True*, the sum of the histograms is
normalized to 1.
Default is ``False``
weights : (n, ) array_like or None, optional
An array of weights, of the same shape as `x`. Each value in `x`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1.
Default is ``None``
cumulative : boolean, optional
If `True`, then a histogram is computed where each bin gives the
counts in that bin plus all bins for smaller values. The last bin
gives the total number of datapoints. If `normed` is also `True`
then the histogram is normalized such that the last bin equals 1.
If `cumulative` evaluates to less than 0 (e.g., -1), the direction
of accumulation is reversed. In this case, if `normed` is also
`True`, then the histogram is normalized such that the first bin
equals 1.
Default is ``False``
bottom : array_like, scalar, or None
Location of the bottom baseline of each bin. If a scalar,
the base line for each bin is shifted by the same amount.
If an array, each bin is shifted independently and the length
of bottom must match the number of bins. If None, defaults to 0.
Default is ``None``
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, optional
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
Default is 'bar'
align : {'left', 'mid', 'right'}, optional
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
Default is 'mid'
orientation : {'horizontal', 'vertical'}, optional
If 'horizontal', `~matplotlib.pyplot.barh` will be used for
bar-type histograms and the *bottom* kwarg will be the left edges.
rwidth : scalar or None, optional
The relative width of the bars as a fraction of the bin width. If
`None`, automatically compute the width.
Ignored if `histtype` is 'step' or 'stepfilled'.
Default is ``None``
log : boolean, optional
If `True`, the histogram axis will be set to a log scale. If `log`
is `True` and `x` is a 1D array, empty bins will be filtered out
and only the non-empty (`n`, `bins`, `patches`) will be returned.
Default is ``False``
color : color or array_like of colors or None, optional
Color spec or sequence of color specs, one per dataset. Default
(`None`) uses the standard line color sequence.
Default is ``None``
label : string or None, optional
String, or sequence of strings to match multiple datasets. Bar
charts yield multiple patches per dataset, but only the first gets
the label, so that the legend command will work as expected.
default is ``None``
stacked : boolean, optional
If `True`, multiple data are stacked on top of each other If
`False` multiple data are aranged side by side if histtype is
'bar' or on top of each other if histtype is 'step'
Default is ``False``
Returns
-------
n : array or list of arrays
The values of the histogram bins. See **normed** and **weights**
for a description of the possible semantics. If input **x** is an
array, then this is an array of length **nbins**. If input is a
sequence arrays ``[data1, data2,..]``, then this is a list of
arrays with the values of the histograms for each of the arrays
in the same order.
bins : array
The edges of the bins. Length nbins + 1 (nbins left edges and right
edge of last bin). Always a single array even when multiple data
sets are passed in.
patches : list or list of lists
Silent list of individual patches used to create the histogram
or list of such list if multiple input datasets.
Other Parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
See also
--------
hist2d : 2D histograms
Notes
-----
Until numpy release 1.5, the underlying numpy histogram function was
incorrect with `normed`=`True` if bin sizes were unequal. MPL
inherited that error. It is now corrected within MPL when using
earlier numpy versions.
Examples
--------
.. plot:: mpl_examples/statistics/histogram_demo_features.py
"""
if not self._hold:
self.cla()
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in numpy !!!
# Validate string inputs here so we don't have to clutter
# subsequent code.
if histtype not in ['bar', 'barstacked', 'step', 'stepfilled']:
raise ValueError("histtype %s is not recognized" % histtype)
if align not in ['left', 'mid', 'right']:
raise ValueError("align kwarg %s is not recognized" % align)
if orientation not in ['horizontal', 'vertical']:
raise ValueError(
"orientation kwarg %s is not recognized" % orientation)
if histtype == 'barstacked' and not stacked:
stacked = True
# Check whether bins or range are given explicitly.
binsgiven = (cbook.iterable(bins) or bin_range is not None)
# basic input validation
flat = np.ravel(x)
if len(flat) == 0:
raise ValueError("x must have at least one data point")
elif len(flat) == 1 and not binsgiven:
raise ValueError(
"x has only one data point. bins or range kwarg must be given")
# Massage 'x' for processing.
# NOTE: Be sure any changes here is also done below to 'weights'
if isinstance(x, np.ndarray) or not iterable(x[0]):
# TODO: support masked arrays;
x = np.asarray(x)
if x.ndim == 2:
x = x.T # 2-D input with columns as datasets; switch to rows
elif x.ndim == 1:
x = x.reshape(1, x.shape[0]) # new view, single row
else:
raise ValueError("x must be 1D or 2D")
if x.shape[1] < x.shape[0]:
warnings.warn(
'2D hist input should be nsamples x nvariables;\n '
'this looks transposed (shape is %d x %d)' % x.shape[::-1])
else:
# multiple hist with data of different length
x = [np.asarray(xi) for xi in x]
nx = len(x) # number of datasets
if color is None:
color = [six.next(self._get_lines.color_cycle)
for i in xrange(nx)]
else:
color = mcolors.colorConverter.to_rgba_array(color)
if len(color) != nx:
raise ValueError("color kwarg must have one color per dataset")
# We need to do to 'weights' what was done to 'x'
if weights is not None:
if isinstance(weights, np.ndarray) or not iterable(weights[0]):
w = np.array(weights)
if w.ndim == 2:
w = w.T
elif w.ndim == 1:
w.shape = (1, w.shape[0])
else:
raise ValueError("weights must be 1D or 2D")
else:
w = [np.asarray(wi) for wi in weights]
if len(w) != nx:
raise ValueError('weights should have the same shape as x')
for i in xrange(nx):
if len(w[i]) != len(x[i]):
raise ValueError(
'weights should have the same shape as x')
else:
w = [None]*nx
# Save the datalimits for the same reason:
_saved_bounds = self.dataLim.bounds
# If bins are not specified either explicitly or via range,
# we need to figure out the range required for all datasets,
# and supply that to np.histogram.
if not binsgiven:
xmin = np.inf
xmax = -np.inf
for xi in x:
if len(xi) > 0:
xmin = min(xmin, xi.min())
xmax = max(xmax, xi.max())
bin_range = (xmin, xmax)
#hist_kwargs = dict(range=range, normed=bool(normed))
# We will handle the normed kwarg within mpl until we
# get to the point of requiring numpy >= 1.5.
hist_kwargs = dict(range=bin_range)
n = []
mlast = None
for i in xrange(nx):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
m = m.astype(float) # causes problems later if it's an int
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
if normed and not stacked:
db = np.diff(bins)
m = (m.astype(float) / db) / m.sum()
if stacked:
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
m += mlast
mlast[:] = m
n.append(m)
if stacked and normed:
db = np.diff(bins)
for m in n:
m[:] = (m.astype(float) / db) / n[-1].sum()
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None, None, -1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
# Save autoscale state for later restoration; turn autoscaling
# off so we can do it all a single time at the end, instead
# of having it done by bar or fill and then having to be redone.
_saved_autoscalex = self.get_autoscalex_on()
_saved_autoscaley = self.get_autoscaley_on()
self.set_autoscalex_on(False)
self.set_autoscaley_on(False)
totwidth = np.diff(bins)
if rwidth is not None:
dr = min(1.0, max(0.0, rwidth))
elif len(n) > 1:
dr = 0.8
else:
dr = 1.0
if histtype == 'bar' and not stacked:
width = dr*totwidth/nx
dw = width
if nx > 1:
boffset = -0.5*dr*totwidth*(1.0-1.0/nx)
else:
boffset = 0.0
stacked = False
elif histtype == 'barstacked' or stacked:
width = dr*totwidth
boffset, dw = 0.0, 0.0
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
if orientation == 'horizontal':
_barfunc = self.barh
bottom_kwarg = 'left'
else: # orientation == 'vertical'
_barfunc = self.bar
bottom_kwarg = 'bottom'
for m, c in zip(n, color):
if bottom is None:
bottom = np.zeros(len(m), np.float)
if stacked:
height = m - bottom
else:
height = m
patch = _barfunc(bins[:-1]+boffset, height, width,
align='center', log=log,
color=c, **{bottom_kwarg: bottom})
patches.append(patch)
if stacked:
bottom[:] = m
boffset += dw
self.set_autoscalex_on(_saved_autoscalex)
self.set_autoscaley_on(_saved_autoscaley)
self.autoscale_view()
elif histtype.startswith('step'):
# these define the perimeter of the polygon
x = np.zeros(4 * len(bins) - 3, np.float)
y = np.zeros(4 * len(bins) - 3, np.float)
x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
if bottom is None:
bottom = np.zeros(len(bins)-1, np.float)
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = bottom, bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
if log:
if orientation == 'horizontal':
self.set_xscale('log', nonposx='clip')
logbase = self.xaxis._scale.base
else: # orientation == 'vertical'
self.set_yscale('log', nonposy='clip')
logbase = self.yaxis._scale.base
# Setting a minimum of 0 results in problems for log plots
if normed or weights is not None:
# For normed data, set to log base * minimum data value
# (gives 1 full tick-label unit for the lowest filled bin)
ndata = np.array(n)
minimum = (np.min(ndata[ndata > 0])) / logbase
else:
# For non-normed data, set the min to log base,
# again so that there is 1 full tick-label unit
# for the lowest bin
minimum = 1.0 / logbase
y[0], y[-1] = minimum, minimum
else:
minimum = np.min(bins)
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
# If fill kwarg is set, it will be passed to the patch collection,
# overriding this
fill = (histtype == 'stepfilled')
xvals, yvals = [], []
for m in n:
if stacked:
# starting point for drawing polygon
y[0] = y[1]
# top of the previous polygon becomes the bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
# set the top of this polygon
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = (m + bottom,
m + bottom)
if log:
y[y < minimum] = minimum
if orientation == 'horizontal':
xvals.append(y.copy())
yvals.append(x.copy())
else:
xvals.append(x.copy())
yvals.append(y.copy())
if fill:
# add patches in reverse order so that when stacking,
# items lower in the stack are plottted on top of
# items higher in the stack
for x, y, c in reversed(list(zip(xvals, yvals, color))):
patches.append(self.fill(
x, y,
closed=True,
facecolor=c))
else:
for x, y, c in reversed(list(zip(xvals, yvals, color))):
split = 2 * len(bins)
patches.append(self.fill(
x[:split], y[:split],
closed=False, edgecolor=c,
fill=False))
# we return patches, so put it back in the expected order
patches.reverse()
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin0 = max(_saved_bounds[0]*0.9, minimum)
xmax = self.dataLim.intervalx[1]
for m in n:
if np.sum(m) > 0: # make sure there are counts
xmin = np.amin(m[m != 0])
# filter out the 0 height bins
xmin = max(xmin*0.9, minimum)
xmin = min(xmin0, xmin)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin0 = max(_saved_bounds[1]*0.9, minimum)
ymax = self.dataLim.intervaly[1]
for m in n:
if np.sum(m) > 0: # make sure there are counts
ymin = np.amin(m[m != 0])
# filter out the 0 height bins
ymin = max(ymin*0.9, minimum)
ymin = min(ymin0, ymin)
self.dataLim.intervaly = (ymin, ymax)
if label is None:
labels = [None]
elif is_string_like(label):
labels = [label]
else:
labels = [str(lab) for lab in label]
for (patch, lbl) in zip_longest(patches, labels, fillvalue=None):
if patch:
p = patch[0]
p.update(kwargs)
if lbl is not None:
p.set_label(lbl)
p.set_snap(False)
for p in patch[1:]:
p.update(kwargs)
p.set_label('_nolegend_')
if binsgiven:
if orientation == 'vertical':
self.update_datalim(
[(bins[0], 0), (bins[-1], 0)], updatey=False)
else:
self.update_datalim(
[(0, bins[0]), (0, bins[-1])], updatex=False)
if nx == 1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
@docstring.dedent_interpd
def hist2d(self, x, y, bins=10, range=None, normed=False, weights=None,
cmin=None, cmax=None, **kwargs):
"""
Make a 2D histogram plot.
Parameters
----------
x, y: array_like, shape (n, )
Input values
bins: [None | int | [int, int] | array_like | [array, array]]
The bin specification:
- If int, the number of bins for the two dimensions
(nx=ny=bins).
- If [int, int], the number of bins in each dimension
(nx, ny = bins).
- If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
- If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
The default value is 10.
range : array_like shape(2, 2), optional, default: None
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the bins parameters): [[xmin,
xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
normed : boolean, optional, default: False
Normalize histogram.
weights : array_like, shape (n, ), optional, default: None
An array of values w_i weighing each sample (x_i, y_i).
cmin : scalar, optional, default: None
All bins that has count less than cmin will not be displayed and
these count values in the return value count histogram will also
be set to nan upon return
cmax : scalar, optional, default: None
All bins that has count more than cmax will not be displayed (set
to none before passing to imshow) and these count values in the
return value count histogram will also be set to nan upon return
Returns
-------
The return value is ``(counts, xedges, yedges, Image)``.
Other parameters
-----------------
kwargs : :meth:`pcolorfast` properties.
See also
--------
hist : 1D histogram
Notes
-----
Rendering the histogram with a logarithmic color scale is
accomplished by passing a :class:`colors.LogNorm` instance to
the *norm* keyword argument. Likewise, power-law normalization
(similar in effect to gamma correction) can be accomplished with
:class:`colors.PowerNorm`.
Examples
--------
.. plot:: mpl_examples/pylab_examples/hist2d_demo.py
"""
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=bin_range,
normed=normed, weights=weights)
if cmin is not None:
h[h < cmin] = None
if cmax is not None:
h[h > cmax] = None
pc = self.pcolorfast(xedges, yedges, h.T, **kwargs)
self.set_xlim(xedges[0], xedges[-1])
self.set_ylim(yedges[0], yedges[-1])
return h, xedges, yedges, pc
@docstring.dedent_interpd
def psd(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
"""
Plot the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, return_line=None, **kwargs)
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*return_line*: bool
Whether to include the line object plotted in the returned values.
Default is False.
If *return_line* is False, returns the tuple (*Pxx*, *freqs*).
If *return_line* is True, returns the tuple (*Pxx*, *freqs*. *line*):
*Pxx*: 1-D array
The values for the power spectrum `P_{xx}` before scaling
(real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxx*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returend if *return_line* is True.
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
.. seealso::
:func:`specgram`
:func:`specgram` differs in the default overlap; in not
returning the mean of the segment periodograms; in returning
the times of the segments; and in plotting a colormap instead
of a line.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitude spectrum.
:func:`csd`
:func:`csd` plots the spectral density between two signals.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
pxx, freqs = mlab.psd(x=x, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
line = self.plot(freqs, 10 * np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
logi = int(np.log10(intv))
if logi == 0:
logi = .1
step = 10 * logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxx, freqs
else:
return pxx, freqs, line
@docstring.dedent_interpd
def csd(self, x, y, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
"""
Plot the cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, return_line=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*: 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*return_line*: bool
Whether to include the line object plotted in the returned values.
Default is False.
If *return_line* is False, returns the tuple (*Pxy*, *freqs*).
If *return_line* is True, returns the tuple (*Pxy*, *freqs*. *line*):
*Pxy*: 1-D array
The values for the cross spectrum `P_{xy}` before scaling
(complex valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxy*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returend if *return_line* is True.
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xy})` for decibels, though `P_{xy}` itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso::
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
pxy, freqs = mlab.csd(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
line = self.plot(freqs, 10 * np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
step = 10 * int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxy, freqs
else:
return pxy, freqs, line
@docstring.dedent_interpd
def magnitude_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, scale=None,
**kwargs):
"""
Plot the magnitude spectrum.
Call signature::
magnitude_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the magnitude spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to
the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*scale*: [ 'default' | 'linear' | 'dB' ]
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'density',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'linear'.
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the magnitude spectrum before scaling (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`psd`
:func:`psd` plots the power spectral density.`.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can plot the magnitude spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
if scale is None or scale == 'default':
scale = 'linear'
spec, freqs = mlab.magnitude_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
if scale == 'linear':
Z = spec
yunits = 'energy'
elif scale == 'dB':
Z = 20. * np.log10(spec)
yunits = 'dB'
else:
raise ValueError('Unknown scale %s', scale)
lines = self.plot(freqs, Z, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Magnitude (%s)' % yunits)
return spec, freqs, lines[0]
@docstring.dedent_interpd
def angle_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the angle spectrum.
Call signature::
angle_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the angle spectrum (wrapped phase spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the angle spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`magnitude_spectrum`
:func:`angle_spectrum` plots the magnitudes of the
corresponding frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the unwrapped version of this
function.
:func:`specgram`
:func:`specgram` can plot the angle spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
spec, freqs = mlab.angle_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Angle (radians)')
return spec, freqs, lines[0]
@docstring.dedent_interpd
def phase_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the phase spectrum.
Call signature::
phase_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the phase spectrum (unwrapped angle spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the phase spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitudes of the
corresponding frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the wrapped version of this
function.
:func:`specgram`
:func:`specgram` can plot the phase spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
spec, freqs = mlab.phase_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Phase (radians)')
return spec, freqs, lines[0]
@docstring.dedent_interpd
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Plot the coherence between *x* and *y*.
Call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
Plot the coherence between *x* and *y*. Coherence is the
normalized cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold:
self.cla()
cxy, freqs = mlab.cohere(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap,
scale_by_freq=scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
@docstring.dedent_interpd
def specgram(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None,
cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, **kwargs):
"""
Plot a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, mode='default', scale='default',
**kwargs)
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*. The spectrogram is plotted as a colormap
(using imshow).
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*mode*: [ 'default' | 'psd' | 'magnitude' | 'angle' | 'phase' ]
What sort of spectrum to use. Default is 'psd'. which takes
the power spectral density. 'complex' returns the complex-valued
frequency spectrum. 'magnitude' returns the magnitude spectrum.
'angle' returns the phase spectrum without unwrapping. 'phase'
returns the phase spectrum with unwrapping.
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 128.
*scale*: [ 'default' | 'linear' | 'dB' ]
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'psd',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'dB' if *mode* is 'psd' or
'magnitude' and 'linear' otherwise. This must be 'linear'
if *mode* is 'angle' or 'phase'.
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`~matplotlib.mlab.specgram`
*kwargs*:
Additional kwargs are passed on to imshow which makes the
specgram image
.. note::
*detrend* and *scale_by_freq* only apply when *mode* is set to
'psd'
Returns the tuple (*spectrum*, *freqs*, *t*, *im*):
*spectrum*: 2-D array
columns are the periodograms of successive segments
*freqs*: 1-D array
The frequencies corresponding to the rows in *spectrum*
*t*: 1-D array
The times corresponding to midpoints of segments (i.e the columns
in *spectrum*)
*im*: instance of class :class:`~matplotlib.image.AxesImage`
The image created by imshow containing the spectrogram
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; in not returning
times; and in generating a line plot instead of colormap.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'magnitude'. Plots a line instead of a colormap.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'angle'. Plots a line instead of a colormap.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'phase'. Plots a line instead of a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
if mode == 'complex':
raise ValueError('Cannot plot a complex specgram')
if scale is None or scale == 'default':
if mode in ['angle', 'phase']:
scale = 'linear'
else:
scale = 'dB'
elif mode in ['angle', 'phase'] and scale == 'dB':
raise ValueError('Cannot use dB scale with angle or phase mode')
spec, freqs, t = mlab.specgram(x=x, NFFT=NFFT, Fs=Fs,
detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if scale == 'linear':
Z = spec
elif scale == 'dB':
if mode is None or mode == 'default' or mode == 'psd':
Z = 10. * np.log10(spec)
else:
Z = 20. * np.log10(spec)
else:
raise ValueError('Unknown scale %s', scale)
Z = np.flipud(Z)
if xextent is None:
xextent = 0, np.amax(t)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, vmin=vmin, vmax=vmax,
**kwargs)
self.axis('auto')
return spec, freqs, t, im
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', origin="upper", **kwargs):
"""
Plot the sparsity pattern on a 2-D array.
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
Parameters
----------
Z : sparse array (n, m)
The array to be plotted.
precision : float, optional, default: 0
If *precision* is 0, any non-zero value will be plotted; else,
values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a special
case: if *precision* is 'present', any value present in the array
will be plotted, even if it is identically zero.
origin : ["upper", "lower"], optional, default: "upper"
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
aspect : ['auto' | 'equal' | scalar], optional, default: "equal"
If 'equal', and `extent` is None, changes the axes aspect ratio to
match that of the image. If `extent` is not `None`, the axes
aspect ratio is changed to match that of the extent.
If 'auto', changes the image aspect ratio to match that of the
axes.
If None, default to rc ``image.aspect`` value.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
See also
--------
imshow : for image options.
plot : for plotting options
"""
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z) > precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc - 0.5, nr - 0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin=origin, **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z) > precision
y, x = np.nonzero(nonzero)
if marker is None:
marker = 's'
if markersize is None:
markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc - 0.5)
self.set_ylim(ymin=nr - 0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
"""
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed, with the first
row at the top. Row and column numbering is zero-based.
Parameters
----------
Z : array_like shape (n, m)
The matrix to be displayed.
Returns
-------
image : `~matplotlib.image.AxesImage`
Other parameters
----------------
kwargs : `~matplotlib.axes.Axes.imshow` arguments
Sets `origin` to 'upper', 'interpolation' to 'nearest' and
'aspect' to equal.
See also
--------
imshow : plot an image
Examples
--------
.. plot:: mpl_examples/pylab_examples/matshow.py
"""
Z = np.asanyarray(Z)
nr, nc = Z.shape
kw = {'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
def violinplot(self, dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None):
"""Make a violin plot.
Call signature::
violinplot(dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None):
Make a violin plot for each column of *dataset* or each vector in
sequence *dataset*. Each filled area extends to represent the
entire data range, with optional lines at the mean, the median,
the minimum, and the maximum.
Parameters
----------
dataset : Array or a sequence of vectors.
The input data.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, creates a vertical violin plot.
Otherwise, creates a horizontal violin plot.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If `True`, will toggle rendering of the means.
showextrema : bool, default = True
If `True`, will toggle rendering of the extrema.
showmedians : bool, default = False
If `True`, will toggle rendering of the medians.
points : scalar, default = 100
Defines the number of points to evaluate each of the
gaussian kernel density estimations at.
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as its only
parameter and return a scalar. If None (default), 'scott' is used.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the
:class:`matplotlib.collections.PolyCollection` instances
containing the filled area of each violin.
- ``means``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the mean values of each of the
violin's distribution.
- ``mins``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the bottom of each violin's
distribution.
- ``maxes``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the top of each violin's
distribution.
- ``bars``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the centers of each violin's
distribution.
- ``medians``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the median values of each of the
violin's distribution.
"""
def _kde_method(X, coords):
kde = mlab.GaussianKDE(X, bw_method)
return kde.evaluate(coords)
vpstats = cbook.violin_stats(dataset, _kde_method, points=points)
return self.violin(vpstats, positions=positions, vert=vert,
widths=widths, showmeans=showmeans,
showextrema=showextrema, showmedians=showmedians)
def violin(self, vpstats, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False):
"""Drawing function for violin plots.
Call signature::
violin(vpstats, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False):
Draw a violin plot for each column of `vpstats`. Each filled area
extends to represent the entire data range, with optional lines at the
mean, the median, the minimum, and the maximum.
Parameters
----------
vpstats : list of dicts
A list of dictionaries containing stats for each violin plot.
Required keys are:
- ``coords``: A list of scalars containing the coordinates that
the violin's kernel density estimate were evaluated at.
- ``vals``: A list of scalars containing the values of the
kernel density estimate at each of the coordinates given
in *coords*.
- ``mean``: The mean value for this violin's dataset.
- ``median``: The median value for this violin's dataset.
- ``min``: The minimum value for this violin's dataset.
- ``max``: The maximum value for this violin's dataset.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, plots the violins veritcally.
Otherwise, plots the violins horizontally.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If true, will toggle rendering of the means.
showextrema : bool, default = True
If true, will toggle rendering of the extrema.
showmedians : bool, default = False
If true, will toggle rendering of the medians.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the
:class:`matplotlib.collections.PolyCollection` instances
containing the filled area of each violin.
- ``means``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the mean values of each of the
violin's distribution.
- ``mins``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the bottom of each violin's
distribution.
- ``maxes``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the top of each violin's
distribution.
- ``bars``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the centers of each violin's
distribution.
- ``medians``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the median values of each of the
violin's distribution.
"""
# Statistical quantities to be plotted on the violins
means = []
mins = []
maxes = []
medians = []
# Collections to be returned
artists = {}
N = len(vpstats)
datashape_message = ("List of violinplot statistics and `{0}` "
"values must have the same length")
# Validate positions
if positions is None:
positions = range(1, N + 1)
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# Validate widths
if np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# Calculate ranges for statistics lines
pmins = -0.25 * np.array(widths) + positions
pmaxes = 0.25 * np.array(widths) + positions
# Check whether we are rendering vertically or horizontally
if vert:
fill = self.fill_betweenx
perp_lines = self.hlines
par_lines = self.vlines
else:
fill = self.fill_between
perp_lines = self.vlines
par_lines = self.hlines
# Render violins
bodies = []
for stats, pos, width in zip(vpstats, positions, widths):
# The 0.5 factor reflects the fact that we plot from v-p to
# v+p
vals = np.array(stats['vals'])
vals = 0.5 * width * vals / vals.max()
bodies += [fill(stats['coords'],
-vals + pos,
vals + pos,
facecolor='y',
alpha=0.3)]
means.append(stats['mean'])
mins.append(stats['min'])
maxes.append(stats['max'])
medians.append(stats['median'])
artists['bodies'] = bodies
# Render means
if showmeans:
artists['cmeans'] = perp_lines(means, pmins, pmaxes, colors='r')
# Render extrema
if showextrema:
artists['cmaxes'] = perp_lines(maxes, pmins, pmaxes, colors='r')
artists['cmins'] = perp_lines(mins, pmins, pmaxes, colors='r')
artists['cbars'] = par_lines(positions, mins, maxes, colors='r')
# Render medians
if showmedians:
artists['cmedians'] = perp_lines(medians,
pmins,
pmaxes,
colors='r')
return artists
def tricontour(self, *args, **kwargs):
return mtri.tricontour(self, *args, **kwargs)
tricontour.__doc__ = mtri.TriContourSet.tricontour_doc
def tricontourf(self, *args, **kwargs):
return mtri.tricontourf(self, *args, **kwargs)
tricontourf.__doc__ = mtri.TriContourSet.tricontour_doc
def tripcolor(self, *args, **kwargs):
return mtri.tripcolor(self, *args, **kwargs)
tripcolor.__doc__ = mtri.tripcolor.__doc__
def triplot(self, *args, **kwargs):
return mtri.triplot(self, *args, **kwargs)
triplot.__doc__ = mtri.triplot.__doc__
| mit |
rainest/dance-partner-matching | networkx/readwrite/tests/test_gml.py | 1 | 2734 | #!/usr/bin/env python
import io
from nose.tools import *
from nose import SkipTest
import networkx
class TestGraph(object):
@classmethod
def setupClass(cls):
global pyparsing
try:
import pyparsing
except ImportError:
try:
import matplotlib.pyparsing as pyparsing
except:
raise SkipTest('gml test: pyparsing not available.')
def setUp(self):
self.simple_data="""Creator me
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1 label
"Edge from node 3 to node 1"
]
]
"""
def test_parse_gml(self):
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals(sorted(G.nodes()),\
['Node 1', 'Node 2', 'Node 3'])
assert_equals( [e for e in sorted(G.edges())],\
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert_equals( [e for e in sorted(G.edges(data=True))],\
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
import os,tempfile
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
fh.write(self.simple_data)
fh.close()
Gin=networkx.read_gml(fname,relabel=True)
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
os.close(fd)
os.unlink(fname)
def test_relabel_dupliate(self):
data="""
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
| bsd-2-clause |
kirel/political-affiliation-prediction | topicmodel.py | 2 | 3883 | # -*- coding: utf-8 -*-
from sklearn.cluster import KMeans
import classifier
from scipy import zeros,double
import datetime
import json
import cPickle
class Topicmodel():
'''
Wrapper class for different topic models
'''
def __init__(self,folder='model',modeltype='kmeans',topics=100,topwords=10):
# the classifier, which also contains the trained BoW transformer
self.bow = cPickle.load(open(folder+'/BoW_transformer.pickle'))
self.folder = folder
self.modeltype = modeltype
self.topics = topics
self.topwords = topwords
if self.modeltype is 'kmeans':
from sklearn.cluster import KMeans
self.model = KMeans(n_clusters=topics,n_init=50)
if self.modeltype is 'kpcakmeans':
from sklearn.cluster import KMeans
from sklearn.decomposition import KernelPCA
self.model = {'kpca':KernelPCA(kernel='rbf',gamma=.1),\
'kmeans':KMeans(n_clusters=topics,n_init=50)}
if self.modeltype is 'nmf':
from sklearn.decomposition import NMF
self.model = NMF(n_components=topics)
def fit(self,X):
'''
fits a topic model
INPUT
X list of strings
'''
# transform list of strings into sparse BoW matrix
X = self.bow['tfidf_transformer'].fit_transform(\
self.bow['count_vectorizer'].fit_transform(X))
# transform word to BoW index into reverse lookup table
words = self.bow['count_vectorizer'].vocabulary_.values()
wordidx = self.bow['count_vectorizer'].vocabulary_.keys()
self.idx2word = dict(zip(words,wordidx))
# depending on the model, train
if self.modeltype is 'kmeans':
Xc = self.model.fit_predict(X)
if self.modeltype is 'kpcakmeans':
Xc = self.model['kpca'].fit_transform(X)
Xc = self.model['kmeans'].fit_predict(Xc)
if self.modeltype is 'nmf':
Xc = self.model.fit_transform(X).argmax(axis=0)
# for each cluster/topic compute covariance of word with cluster label
# this measure is indicative of the importance of the word for the topic
ass = zeros(self.topics)
self.topicstats = []
for cluster in range(self.topics):
# this is a binary vector, true if a data point was in this cluster
y = double(Xc==cluster)
# this is the covariance of the data with the cluster label
Xcov = X.T.dot(y)
# find the most strongly covarying (with the cluster label) words
wordidx = reversed(Xcov.argsort()[-self.topwords:])
topicwords = dict([(self.idx2word[idx],Xcov[idx]) for idx in wordidx])
self.topicstats.append({'assignments':y.sum(),'clusterid':cluster,\
'words': topicwords})
print 'Topic %d: %3d Assignments '%(cluster,y.sum())\
+ 'Topwords: ' + ' '.join(topicwords.keys()[:10])
datestr = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
fn = self.folder+'/topicmodel-%s-'%self.modeltype +datestr+'.json'
print "Saving model stats to "+fn
open(fn,'wb').write(json.dumps(self.topicstats))
def predict(self,X):
'''
predicts cluster assignment from list of strings
INPUT
X list of strings
'''
if X is not list: X = [X]
X = self.bow['tfidf_transformer'].transform(\
self.bow['count_vectorizer'].transform(X))
if self.modeltype is 'kmeans':
return self.model.predict(X)
if self.modeltype is 'kpcakmeans':
return self.model['kmeans'].predict(self.model['kpca'].transform(X))
if self.modeltype is 'nmf':
return self.model.transform(X).argmax(axis=0)
| mit |
darwinex/DarwinexLabs | tools/Python/MetaTrader_Helpers/Report_Conversion/MT_TO_PYTHON/DWX_MT_TO_PYTHON_v2_RC4.py | 1 | 9361 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 04 12:01:07 2019
@author: Darwinex Labs (www.darwinex.com)
DWX_MT_TO_PYTHON_v2_RC4.py
Purpose:
This script enables traders knowledgable in Python to import
their Account History and Strategy Tester reports directly into
pandas dataframes.
Leverage the capabilities of Python to conduct more meaninful,
sophisitcated analyses of your track records and backtests.
Dependencies:
- Python 3.6+
- BeautifulSoup 4 (bs4)
- pandas (Python Data Analysis Library)
- numpy (Scientific Computing with Python)
Notes:
The script isolates certain structural nuances of MetaTrader's HTML report,
specified in the __init__() function.
These are at the mercy of MetaTrader, and should these change in future, the
corresponding variables in the script will require adjustments accordingly.
Tested with:
MetaTrader 4 Build 1170 (20 Dec 2018)
Usage:
1) Set _type = 'normal' for Account Histories saves as "Normal" Reports
2) Set _type = 'detailed' for Account Histories saves as "Detailed" Reports
3) Set _type = 'backtest' for Strategy Tester reports
By default, dataframes generated are stored inside a class variable
called '_statement_df'. Set _verbose to True to print this upon generation.
"""
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
# Set pandas options
pd.set_option('display.width', 1000)
pd.set_option('display.max_columns', 500)
class DWX_MT_TO_PYTHON():
def __init__(self, _verbose=False,
_type='normal',
_filename='<INSERT_PATH_TO_HTML_REPORT_HERE>'):
#############################
# MetaTrader HTML Variables #
#############################
self.STATEMENT_HEADER = ['ticket','open_time','type','size','item',
'open_price','sl','tp','close_time',
'close_price','commission','taxes',
'swap','profit','magic','comment']
self.BACKTEST_HEADER = ['id','open_time','type','order','size',
'open_price','sl','tp','profit','balance']
self.TRADE_FIELDS = [(self.STATEMENT_HEADER[_r],_r) for _r in range(0,14)]
self.BALANCE_FIELDS = [('ticket',0),
('open_time',1),
('type',2),
('item',3),
('profit',4)]
self.COMMENT_FIELDS = [('magic',1),
('comment',2)]
# Variables to sanitize
self.ST_NUMS_TO_SANITIZE = ['ticket','size','open_price','close_price',
'commission','taxes','swap',
'profit','sl','tp','magic']
self.BT_NUMS_TO_SANITIZE = ['id','order','size','open_price',
'sl','tp','profit','balance']
self.ST_DATES_TO_SANITIZE = ['open_time','close_time']
self.BT_DATES_TO_SANITIZE = ['open_time']
# Number of rows to exclude depending on statement type (normal or detailed)
self.NORMAL_TRUNC = 15
self.DETAILED_TRUNC = 29
self.BACKTEST_TRUNC = 0
# Backtest rows differ as follows:
self.BT_ROW_NO_PROFIT_LEN = 8
self.BT_ROW_WITH_PROFIT_LEN = 10
#####################################
# Store DataFrame for other methods #
#####################################
self._statement_df = None
self._backtest_df = None
self._verbose = _verbose
# Transform input file into pandas dataframe
print(self._statement_to_dataframe_(_type=_type,
_filename=_filename))
print('\n[INFO] Data stored in self._statement_df.')
##########################################################################
def _statement_to_dataframe_(self, _type='normal',
_filename='<INSERT_PATH_TO_HTML_REPORT_HERE>'):
# Check if input type is correct, else return error
if _type not in ['backtest','normal','detailed']:
print('[KERNEL] Invalid input file -> must be one of backtest, normal or detailed.')
return None
try:
with open (_filename, "r") as myfile:
s=myfile.read()
except FileNotFoundError:
print('[ERROR] No such file exists!')
return None
# Invoke HTML Parser
_soup = BeautifulSoup(s, 'html.parser')
if _type.lower() == 'normal':
_table = _soup.find_all('table')[0]
_trunc = self.NORMAL_TRUNC
elif _type.lower() == 'detailed':
_table = _soup.find_all('table')[0]
_trunc = self.DETAILED_TRUNC
elif _type.lower() == 'backtest':
_table = _soup.find_all('table')[1]
_trunc = self.BACKTEST_TRUNC
else:
print('[ERROR] Unrecognized statement type.. must be backtest, normal or detailed.')
return None
# Count number of rows in track record (ignore last 14)
_x = (len(_table.findAll('tr')) - _trunc)
# Extract rows
_rows = _table.findAll('tr')
if _type == 'backtest':
_rows = _rows[1:_x]
# Create dict DB with empty lists (for dataframe later)
_dict = {_c: [np.nan for _l in range(len(_rows))] for _c in self.BACKTEST_HEADER}
else:
_rows = _rows[3:_x]
# Create dict DB with empty lists (for dataframe later)
_dict = {_c: [np.nan for _l in range(len(_rows))] for _c in self.STATEMENT_HEADER}
# Initialize row counter
_row_counter = 0
for _row in _rows:
# Extract values
_values = _row.findAll('td')
if _type != 'backtest':
#######################################
# Balance record (deposit/withdrawal) #
#######################################
if len(_row) == len(self.BALANCE_FIELDS):
for _f in self.BALANCE_FIELDS:
_dict[_f[0]][_row_counter] = _values[_f[1]].getText()
################
# Trade record #
################
elif len(_row) == len(self.TRADE_FIELDS):
for _f in self.TRADE_FIELDS:
_dict[_f[0]][_row_counter] = _values[_f[1]].getText()
###################
# Comment / Magic #
###################
elif len(_row) == len(self.COMMENT_FIELDS)+1:
# Update previous trade's comment/magic fields
for _f in self.COMMENT_FIELDS:
_dict[_f[0]][_row_counter-1] = _values[_f[1]].getText()
else:
print('[ERROR] Cannot recognize row structure.. please check HTML report to confirm if anything has changed?')
return None
else:
if len(_values) == self.BT_ROW_WITH_PROFIT_LEN:
_iter_range = range(self.BT_ROW_WITH_PROFIT_LEN)
else:
_iter_range = range(self.BT_ROW_NO_PROFIT_LEN)
for _i in _iter_range:
_dict[self.BACKTEST_HEADER[_i]][_row_counter] = _values[_i].getText()
# Update for next iteration
_row_counter += 1
# Create dataframe
_df = pd.DataFrame(data=_dict).dropna(how='all')
# Sanitize data types
if _type != 'backtest':
for _n in self.ST_NUMS_TO_SANITIZE:
_df[_n] = pd.to_numeric(_df[_n].str.replace(' ',''))
for _d in self.ST_DATES_TO_SANITIZE:
_df[_d] = pd.to_datetime(_df[_d])
# Save locally for future use
self._statement_df = _df
else:
for _n in self.BT_NUMS_TO_SANITIZE:
_df[_n] = pd.to_numeric(_df[_n].str.replace(' ',''))
for _d in self.BT_DATES_TO_SANITIZE:
_df[_d] = pd.to_datetime(_df[_d])
# Save locally for future use
self._backtest_df = _df
# Return sanitized dataframe
if self._verbose == True:
return _df
##########################################################################
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 130 | 50966 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
Arn-O/kadenze-deep-creative-apps | final-project/libs/utils.py | 1 | 2157 | """
Utility for image manipulation, directly copied (and slightly modified) from:
https://github.com/pkmital/CADL
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
def img_crop(img, pos):
"""Crop to square from a position.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
pos : int
Start position.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = img.shape[0]
crop = np.take(img, range(pos, pos + size), axis=1)
return crop
def build_gif(imgs, interval=0.1, dpi=72,
save_gif=True, saveto='animation.gif',
show_gif=False, cmap=None):
"""Take an array or list of images and create a GIF.
Parameters
----------
imgs : np.ndarray or list
List of images to create a GIF of
interval : float, optional
Spacing in seconds between successive images.
dpi : int, optional
Dots per inch.
save_gif : bool, optional
Whether or not to save the GIF.
saveto : str, optional
Filename of GIF to save.
show_gif : bool, optional
Whether or not to render the GIF using plt.
cmap : None, optional
Optional colormap to apply to the images.
Returns
-------
ani : matplotlib.animation.ArtistAnimation
The artist animation from matplotlib. Likely not useful.
"""
imgs = np.asarray(imgs)
h, w, *c = imgs[0].shape
fig, ax = plt.subplots(figsize=(np.round(w / dpi), np.round(h / dpi)))
fig.subplots_adjust(bottom=0)
fig.subplots_adjust(top=1)
fig.subplots_adjust(right=1)
fig.subplots_adjust(left=0)
ax.set_axis_off()
if cmap is not None:
axs = list(map(lambda x: [
ax.imshow(x, cmap=cmap)], imgs))
else:
axs = list(map(lambda x: [
ax.imshow(x)], imgs))
ani = animation.ArtistAnimation(
fig, axs, interval=interval*1000, repeat_delay=0, blit=True)
if save_gif:
ani.save(saveto, writer='imagemagick', dpi=dpi)
if show_gif:
plt.show()
return ani
| apache-2.0 |
akrherz/dep | scripts/plots/scenario_soil.py | 2 | 2595 | """Plot soil stuff."""
import os
import glob
import sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pyiem.dep import read_slp
from pyiem.util import utc
def read_data():
"""Do intensive stuff"""
scenarios = []
kr = []
for huc12 in [
"070600060701",
"070801020905",
"101702040703",
"070600020602",
"071000090201",
"102300020202",
"102300060407",
"071000091101",
"101702032003",
"071000081503",
"102300031404",
"070801021001",
"101702031903",
"071000070402",
"102400030501",
"070600040401",
"102300030402",
"070802050304",
"070801060604",
"071000061401",
"102802010402",
"102400090404",
"070802070602",
"071000050703",
"070802090401",
"102400050501",
"070802060403",
]:
for scenario in [0, 1002]:
os.chdir("/i/%s/sol/%s/%s" % (scenario, huc12[:8], huc12[8:]))
for fn in glob.glob("*.sol"):
lines = open(fn).readlines()
# this fails sometimes
kr.append(float(lines[3].split()[-3]))
if kr[-1] > 0.1:
print(kr[-1])
print(
"/i/%s/sol/%s/%s/%s"
% (scenario, huc12[:8], huc12[8:], fn)
)
scenarios.append(scenario)
df = pd.DataFrame({"kr": kr, "scenario": scenarios})
df.to_csv("/tmp/soils.csv", index=False)
def main():
"""Go Main Go"""
# huc12 = argv[1]
read_data()
fig, ax = plt.subplots(1, 1)
df = pd.read_csv("/tmp/soils.csv")
for scenario, gdf in df.groupby("scenario"):
print(gdf["kr"].describe())
gdf["kr"].plot.hist(
bins=500,
cumulative=True,
density=1,
ax=ax,
label="%s avg: %.4f" % (scenario, gdf["kr"].mean()),
histtype="step",
)
# gdf["maxslope"].plot.hist(
# bins=500, cumulative=True, density=1, ax=ax,
# label="%s peak, avg: %.1f%%" % (scenario, gdf['maxslope'].mean()),
# histtype='step')
ax.set_yticks(np.arange(0, 1.01, 0.1))
ax.set_xlim(0, 0.012)
ax.grid(True)
ax.legend(loc=4, ncol=2)
ax.set_title("Length Comparison between new and old flowpaths")
ax.set_xlabel("Length (m), generated %s" % (utc().strftime("%d %b %Y"),))
fig.savefig("/tmp/test.png")
if __name__ == "__main__":
main()
| mit |
JT5D/scikit-learn | sklearn/tests/test_base.py | 9 | 5815 |
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
"""Tests that clone creates a correct deep copy.
We create an estimator, make a copy of its original state
(which, in this case, is the current state of the estimator),
and check that the obtained copy is a correct deep copy.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
"""Tests that clone doesn't copy everything.
We first create an estimator, give it an own attribute, and
make a copy of its original state. Then we check that the copy doesn't
have the specific attribute we manually added to the initial estimator.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
"""Check that clone raises an error on buggy estimators."""
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
"""Regression test for cloning estimators with empty arrays"""
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_repr():
"""Smoke test the repr of the base estimator."""
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
"""Smoke test the str of the base estimator"""
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
#bad_pipeline = Pipeline([("bad", NoEstimator())])
#assert_raises(AttributeError, bad_pipeline.set_params,
#bad__stupid_param=True)
| bsd-3-clause |
aydevosotros/tradingMachine | DataManager.py | 1 | 1627 | #
# Copyright (c) 2015 by Antonio Molina García-Retamero. All Rights Reserved.
#
import requests
import datetime
import csv
import numpy as np
import matplotlib.pyplot as plt
class DataManager(object):
"""docstring for DataManager"""
def __init__(self, arg):
super(DataManager, self).__init__()
self.arg = arg
@staticmethod
def getQuotesYahoo(symbol):
url = "http://real-chart.finance.yahoo.com/table.csv?s={0}&a=00&b=1&c=2000&d={1}&e={2}&f={3}&g=d&ignore=.csv".format(symbol, datetime.datetime.now().month-1, datetime.datetime.now().day, datetime.datetime.now().year)
print(url)
r = requests.get(url)
content = r.text
rows = content.split("\n")[1:-1]
data = [[float(value) for value in row.split(',')[1:]] for row in rows]
dates = [x[0] for x in rows]
return [dates, data[::-1]]
@staticmethod
def byWiningDayTagging(quotes):
tags = []
for quote in quotes:
if quote[3] - quote[0] > 0:
tags.append(1)
else:
tags.append(-1)
return tags
if __name__ == '__main__':
# data = DataManager.getQuotesYahoo("FB")
data = DataManager.getQuotesYahoo("MSF")
dates = data[0]
data = data[1]
print(len(data))
fig = plt.figure()
plt.plot([x[5] for x in data])
# plt.show()
print(DataManager.byWiningDayTagging(data))
#Notes
## http://finance.yahoo.com/q/hp?s=FB&a=00&b=1&c=2008&d=06&e=31&f=2015&g=d&z=66&y=792
## http://real-chart.finance.yahoo.com/table.csv?s=FB&a=00&b=1&c=2008&d=06&e=31&f=2015&g=d&ignore=.csv
| gpl-2.0 |
shusenl/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
adamrvfisher/TechnicalAnalysisLibrary | CSVtoDFDiv.py | 1 | 1762 | # -*- coding: utf-8 -*-
"""
Created on Sun May 21 11:47:12 2017
@author: AmatVictoriaCuramIII
"""
from pandas import read_csv
import pandas as pd
import os
CSVfiles = os.listdir('F:\\Users\\AmatVictoriaCuram\\TemporaryCSV')
ranger = range(0,len(CSVfiles))
for i in ranger:
try:
temp = read_csv('F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\' +
(CSVfiles[i]), sep = ',')
temp = temp.set_index('Date')
temp.index = pd.to_datetime(temp.index, format = "%Y/%m/%d")
temp = temp.loc[:,~temp.columns.duplicated()]
temp = temp[~temp.index.duplicated(keep='first')]
if not os.path.exists('F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-7]):
os.makedirs('F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-7])
pd.to_pickle(temp, 'F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-7] + '\\' + CSVfiles[i][:-4])
except OSError:
continue
for i in ranger:
try:
glaze = pd.read_pickle('F:\\Users\\AmatVictoriaCuram\\Database\\' +
(CSVfiles[i][:-4]))
for x in glaze.columns:
glaze[x] = pd.to_numeric(glaze[x], errors='coerce')
pd.to_pickle(glaze, 'F:\\Users\\AmatVictoriaCuram\\Database\\' +
CSVfiles[i][:-4])
except OSError:
continue
#this is for testing individual CSVs
#tester = read_csv('F:\\Users\\AmatVictoriaCuram\\TemporaryCSV\\' +
# (df['CSVname'][0]), sep = ',')
#tester = tester.set_index('Date')
#pd.to_pickle(tester, 'F:\\Users\\AmatVictoriaCuram\\Database\\' + df['CSVname'][0][:-4]) | apache-2.0 |
chengsoonong/digbeta | dchen/music/src/tools/evaluate.py | 2 | 10972 | import numpy as np
# from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score
from joblib import Parallel, delayed
def calc_metrics(y_true, y_pred, tops=[]):
assert y_true.ndim == y_pred.ndim == 1
assert len(y_true) == len(y_pred)
npos = y_true.sum()
assert npos > 0
assert npos < len(y_true)
rp, hitrates = calc_RPrecision_HitRate(y_true, y_pred, tops=tops)
auc = roc_auc_score(y_true, y_pred)
return rp, hitrates, auc
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def diversity(vec):
"""
diversity(L) = ( \sum_{i \in L} \sum_{j \in L \setminus i} dist(i, j) ) / (|L| * (|L| - 1))
"""
assert vec.ndim == 1
norm = len(vec) * (len(vec) - 1)
sim_mat = vec[..., np.newaxis] == vec[np.newaxis, ...] # pairwise comparison
# dist_mat = 1 - sim_mat
# return (dist_mat.sum() - dist_mat.trace()) / norm # note that dist_mat.trace() = 0
return (1 - sim_mat).sum() / norm
def pairwise_distance_hamming(X, normalise=True):
"""
A vectorised approach to compute Hamming distance between all pairs of rows of matrix X.
Note that `p XOR q = ( p AND NOT q ) OR ( NOT p AND q )` from
[here](https://math.stackexchange.com/questions/38473/is-xor-a-combination-of-and-and-not-operators),
let p, q \in \{0, 1\}^{N}, then
Hamming_distance(p, q)
= (1 / N) \sum_{i=1}^N p_i XOR q_i
= (1 / N) \sum_{i=1}^N ( p_i (1 - q_i) + (1 - p_i) q_i )
= (1 / N) ( \sum_{i=1}^N p_i (1 - q_i) + \sum_{i=1}^N (1 - p_i) q_i \right)
= (1 / N) ( p^T (1 - q) + (1 - p)^T q )
= (1 / N) ( \sum_{i=1}^N p_i + \sum_{i=1}^N q_i - 2 p^T q )
Sanity check:
```
N, D = 1000, 200
aa = np.zeros(N * D, dtype=np.int)
idx = np.random.permutation(N * D)[:int(N * D * .3)]
aa[idx] = 1
aa = aa.reshape(N, D)
d1 = sklearn.metrics.pairwise.pairwise_distances(aa, metric='hamming', n_jobs=2)
d2 = (np.dot(aa, 1-aa.T) + np.dot(1-aa, aa.T)) / D
sum_vec = aa.sum(axis=1, keepdims=True)
d3 = (sum_vec + sum_vec.T - 2 * np.dot(aa, aa.T)) / D
diff = (d1 - d2).ravel(); print(np.dot(diff, diff))
diff2 = (d1 - d3).ravel(); print(np.dot(diff2, diff2))
```
"""
M, D = X.shape
# X = X.astype(np.int)
assert X.dtype == np.int
norm = D if normalise is True else 1
# sum_vec = X.sum(axis=1, keepdims=True)
# dist = (sum_vec + sum_vec.T - 2 * np.dot(X, X.T)) / D
# support sparse matrix
sum_vec = X.sum(axis=1).reshape(M, 1)
dist = (sum_vec + sum_vec.T - 2 * X.dot(X.T)) / norm
return dist
def calc_Precision_Recall(y_true, y_pred, K=[]):
"""
Compute Precision (Hit-Rate) and Recall given top-K recommendation.
"""
tops = K
assert y_true.ndim == y_pred.ndim == 1
assert len(y_true) == len(y_pred)
assert y_true.dtype == np.bool
assert type(tops) == list
assert len(tops) > 0
sortix = np.argsort(-y_pred)
npos = y_true.sum()
assert npos > 0
# y_ = y_true[sortix]
pak = dict()
rak = dict()
for top in tops:
assert 0 < top <= len(y_true)
true_pos = np.sum(y_true[sortix[:top]])
pak[top] = true_pos / npos
rak[top] = true_pos / top
return (pak, rak)
def calc_RPrecision_HitRate(y_true, y_pred, tops=[]):
"""
Compute R-Precision and Hit-Rate at top-N.
"""
assert y_true.ndim == y_pred.ndim == 1
assert len(y_true) == len(y_pred)
assert y_true.dtype == np.bool
assert type(tops) == list
sortix = np.argsort(-y_pred)
npos = y_true.sum()
assert npos > 0
y_ = y_true[sortix]
rp = np.mean(y_[:npos])
if len(tops) == 0:
return (rp, None)
hitrates = dict()
for top in tops:
assert 0 < top <= len(y_true)
hitrates[top] = np.sum(y_true[sortix[:top]]) / npos
return (rp, hitrates)
def calc_F1(Y_true, Y_pred):
"""
Compute F1 scores for multilabel prediction, one score for each example.
precision = true_positive / n_true
recall = true_positive / n_positive
f1 = (2 * precision * recall) / (precision + recall) = 2 * true_positive / (n_true + n_positive)
"""
assert Y_true.shape == Y_pred.shape
assert Y_true.dtype == Y_pred.dtype == np.bool
N, K = Y_true.shape
n_true = np.sum(Y_true, axis=1)
n_positive = np.sum(Y_pred, axis=1)
true_positive = np.sum(np.logical_and(Y_true, Y_pred), axis=1)
numerator = 2 * true_positive
denominator = n_true + n_positive
nonzero_ix = np.nonzero(denominator)[0]
f1 = np.zeros(N)
f1[nonzero_ix] = np.divide(numerator[nonzero_ix], denominator[nonzero_ix])
return f1
def calc_RPrecision(Y_true, Y_pred, axis=0):
"""
Compute RPrecision, one score for each (example if axis=0 else label)
- thresholding predictions using the K-th largest predicted score, K is #positives in ground truth for an example
- RPrecision: #true_positives / K
where by the definition, K = #positives_in_ground_truth
"""
assert Y_true.shape == Y_pred.shape
assert Y_true.dtype == np.bool
assert axis in [0, 1]
N, K = Y_true.shape
ax = 1 - axis
num = (N, K)[axis]
numPos = np.sum(Y_true, axis=ax).astype(np.int)
sort_ix = np.argsort(-Y_pred, axis=ax)
if axis == 0:
rows = np.arange(N)
cols = sort_ix[rows, numPos-1] # index of thresholds (the K-th largest scores, NOTE index starts at 0)
thresholds = Y_pred[rows, cols].reshape(N, 1) # the K-th largest scores
Y_pred_bin = Y_pred >= thresholds # convert scores to binary predictions
else:
cols = np.arange(K)
rows = sort_ix[numPos-1, cols]
thresholds = Y_pred[rows, cols].reshape(1, K)
Y_pred_bin = Y_pred >= thresholds
nonzero_ix = np.nonzero(numPos)[0]
true_positives = np.logical_and(Y_true, Y_pred_bin)
rps = np.zeros(num)
rps[nonzero_ix] = true_positives.sum(axis=ax)[nonzero_ix] / numPos[nonzero_ix]
return rps[nonzero_ix], nonzero_ix
def calc_rank(x, largestFirst=True):
"""
Compute the rank of numbers in an array.
Input
- x: a 1D array of numbers
- largestFirst: boolean
if True, the largest number has rank 1, the second largest has rank 2, ...
if False, the smallest number has rank 1, the second smallest has rank 2, ...
"""
assert x.ndim == 1
n = len(x)
assert n > 0
sortix = np.argsort(-x)
rank = np.zeros(n, dtype=np.int)
# use a loop
# for i, six in enumerate(sortix):
# rank[six] = i+1
# without using loop
rank[sortix] = np.arange(n) + 1
if largestFirst is True:
return rank
else:
return n + 1 - rank
def f1_score_nowarn(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None):
"""
Compute F1 score, use the same interface as sklearn.metrics.f1_score,
but disable the warning when both precision and recall are zeros.
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=1, labels=labels, pos_label=pos_label,
average=average, warn_for=(), sample_weight=sample_weight)
return f
def evalPred(truth, pred, metricType='Precision@K'):
"""
Compute loss given ground truth and prediction
Input:
- truth: binary array of true labels
- pred: real-valued array of predictions
- metricType: can be subset 0-1, Hamming, ranking, and Precision@K where K = # positive labels.
"""
truth = np.asarray(truth)
pred = np.asarray(pred)
assert(truth.shape[0] == pred.shape[0])
L = truth.shape[0]
nPos = np.sum(truth)
assert float(nPos).is_integer()
nPos = int(nPos)
predBin = np.array((pred > 0), dtype=np.int)
if type(metricType) == tuple:
# Precision@k, k is constant
k = metricType[1]
assert k > 0
assert k <= L
assert nPos > 0
# sorted indices of the labels most likely to be +'ve
idx = np.argsort(pred)[::-1]
# true labels according to the sorted order
y = truth[idx]
# fraction of +'ves in the top K predictions
# return np.mean(y[:k]) if nPos > 0 else 0
return np.mean(y[:k])
elif metricType == 'Subset01':
return 1 - int(np.all(truth == predBin))
elif metricType == 'Hamming':
return np.sum(truth != predBin) / L
elif metricType == 'Ranking':
loss = 0
for i in range(L):
for j in range(L):
if truth[i] > truth[j]:
if pred[i] < pred[j]:
loss += 1
elif pred[i] == pred[j]:
loss += 0.5
else:
pass
# denom = nPos * (L - nPos)
# return loss / denom if denom > 0 else 0
return loss
elif metricType == 'TopPush':
posInd = np.nonzero(truth)[0].tolist()
negInd = sorted(set(np.arange(L)) - set(posInd))
return np.mean(pred[posInd] <= np.max(pred[negInd]))
elif metricType == 'BottomPush':
posInd = np.nonzero(truth)[0].tolist()
negInd = sorted(set(np.arange(L)) - set(posInd))
return np.mean(pred[negInd] >= np.min(pred[posInd]))
elif metricType == 'RPrecision':
assert nPos > 0
# sorted indices of the labels most likely to be +'ve
idx = np.argsort(pred)[::-1]
# true labels according to the sorted order
y = truth[idx]
# fraction of +'ves in the top K predictions
return np.mean(y[:nPos])
# elif metricType == 'Precision@3':
# # sorted indices of the labels most likely to be +'ve
# idx = np.argsort(pred)[::-1]
#
# # true labels according to the sorted order
# y = truth[idx]
#
# # fraction of +'ves in the top K predictions
# return np.mean(y[:3])
#
# elif metricType == 'Precision@5':
# # sorted indices of the labels most likely to be +'ve
# idx = np.argsort(pred)[::-1]
#
# # true labels according to the sorted order
# y = truth[idx]
#
# # fraction of +'ves in the top K predictions
# return np.mean(y[:5])
else:
assert(False)
def calcLoss(allTruths, allPreds, metricType, njobs=-1):
N = allTruths.shape[0]
losses = Parallel(n_jobs=njobs)(delayed(evalPred)(allTruths[i, :], allPreds[i, :], metricType)
for i in range(N))
return np.asarray(losses)
def avgPrecision(allTruths, allPreds, k):
L = allTruths.shape[1]
assert k <= L
losses = []
metricType = ('Precision', k)
for i in range(allPreds.shape[0]):
pred = allPreds[i, :]
truth = allTruths[i, :]
losses.append(evalPred(truth, pred, metricType))
return np.mean(losses)
| gpl-3.0 |
alexxyjiang/docker-images | jupyterlab/scala2.11/jupyter_notebook_config.py | 2 | 23448 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = '/home/devuser/data'
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# password = p@ssw0rd++
c.NotebookApp.password = 'sha1:a0dc1449069e:5920839c9d74d9325900cb3f514d906689b0c5a0'
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine though ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
c.NotebookApp.password_required = True
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# LabApp(NotebookApp) configuration
#------------------------------------------------------------------------------
## The app directory to launch JupyterLab from.
c.LabApp.app_dir = '/home/devuser/data'
## Whether to start the app in core mode. In this mode, JupyterLab will run using
# the JavaScript assets that are within the installed JupyterLab Python package.
# In core mode, third party extensions are disabled. The `--dev-mode` flag is an
# alias to this to be used when the Python package itself is installed in
# development mode (`pip install -e .`).
#c.LabApp.core_mode = False
## The default URL to redirect to from `/`
#c.LabApp.default_url = '/lab'
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| mit |
Achuth17/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
buckiracer/data-science-from-scratch | RefMaterials/code-python3/introduction.py | 6 | 8194 | # at this stage in the book we haven't actually installed matplotlib,
# comment this out if you need to
from matplotlib import pyplot as plt
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print(friends_of_friend_ids(users[3])) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user["user_id"]
for interested_user_id in users_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure():
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.items()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print()
print("######################")
print("#")
print("# FINDING KEY CONNECTORS")
print("#")
print("######################")
print()
print("total connections", total_connections)
print("number of users", num_users)
print("average connections", total_connections / num_users)
print()
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print("users sorted by number of friends:")
print(sorted(num_friends_by_id,
key=lambda pair: pair[1], # by number of friends
reverse=True)) # largest to smallest
print()
print("######################")
print("#")
print("# DATA SCIENTISTS YOU MAY KNOW")
print("#")
print("######################")
print()
print("friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0]))
print("friends of friends for user 3:", friends_of_friend_ids(users[3]))
print()
print("######################")
print("#")
print("# SALARIES AND TENURES")
print("#")
print("######################")
print()
print("average salary by tenure", average_salary_by_tenure)
print("average salary by tenure bucket", average_salary_by_bucket)
print()
print("######################")
print("#")
print("# MOST COMMON WORDS")
print("#")
print("######################")
print()
for word, count in words_and_counts.most_common():
if count > 1:
print(word, count)
| unlicense |
knutfrode/opendrift | opendrift/__init__.py | 1 | 7342 | import logging
import unittest
import importlib
import numpy as np
import time
from datetime import timedelta
from .version import __version__
# For automated access to available drift classes, e.g. for GUI
# Hardcoded for now
_available_models = \
['leeway.Leeway',
'openoil.OpenOil',
'shipdrift.ShipDrift']
def get_model_names():
return [m.split('.')[-1] for m in _available_models]
def get_model(model_name):
if model_name not in get_model_names():
raise ValueError('No drift model named %s' % model_name)
else:
for m in _available_models:
if m.split('.')[-1] == model_name:
module = importlib.import_module(
'opendrift.models.' + m.split('.')[0])
model = getattr(module, model_name)
return model
def open(filename, times=None):
'''Import netCDF output file as OpenDrift object of correct class'''
import os
import pydoc
from netCDF4 import Dataset
if not os.path.exists(filename):
logging.info('File does not exist, trying to retrieve from URL')
import urllib
try:
urllib.urlretrieve(filename, 'opendrift_tmp.nc')
filename = 'opendrift_tmp.nc'
except:
raise ValueError('%s does not exist' % filename)
n = Dataset(filename)
try:
module_name = n.opendrift_module
class_name = n.opendrift_class
except:
raise ValueError(filename + ' does not contain '
'necessary global attributes '
'opendrift_module and opendrift_class')
n.close()
cls = pydoc.locate(module_name + '.' + class_name)
if cls is None:
from models import oceandrift3D
cls = oceandrift3D.OceanDrift3D
o = cls()
o.io_import_file(filename, times)
logging.info('Returning ' + str(type(o)) + ' object')
return o
def versions():
import multiprocessing
import platform
import scipy
import matplotlib
import netCDF4
import sys
s = '\n------------------------------------------------------\n'
s += 'Software and hardware:\n'
s += ' OpenDrift version %s\n' % __version__
try:
from psutil import virtual_memory
ram = virtual_memory().total/(1024**3)
except:
ram = 'unknown'
s += ' %s GB memory\n' % ram
s += ' %s processors (%s)\n' % (multiprocessing.cpu_count(),
platform.processor())
s += ' NumPy version %s\n' % np.__version__
s += ' SciPy version %s\n' % scipy.__version__
s += ' Matplotlib version %s\n' % matplotlib.__version__
s += ' NetCDF4 version %s\n' % netCDF4.__version__
s += ' Python version %s\n' % sys.version.replace('\n', '')
s += '------------------------------------------------------\n'
return s
def import_from_ladim(ladimfile, romsfile):
"""Import Ladim output file as OpenDrift simulation obejct"""
from models.oceandrift3D import OceanDrift3D
o = OceanDrift3D()
from netCDF4 import Dataset, date2num, num2date
if isinstance(romsfile, basestring):
from opendrift.readers import reader_ROMS_native
romsfile = reader_ROMS_native.Reader(romsfile)
l = Dataset(ladimfile, 'r')
pid = l.variables['pid'][:]
particle_count = l.variables['particle_count'][:]
end_index = np.cumsum(particle_count)
start_index = np.concatenate(([0], end_index[:-1]))
x = l.variables['X'][:]
y = l.variables['Y'][:]
lon, lat = romsfile.xy2lonlat(x, y)
time = num2date(l.variables['time'][:],
l.variables['time'].units)
history_dtype_fields = [
(name, o.ElementType.variables[name]['dtype'])
for name in o.ElementType.variables]
# Add environment variables
o.history_metadata = o.ElementType.variables.copy()
history_dtype = np.dtype(history_dtype_fields)
num_timesteps = len(time)
num_elements = len(l.dimensions['particle'])
o.history = np.ma.array(
np.zeros([num_elements, num_timesteps]),
dtype=history_dtype, mask=[True])
for n in range(num_timesteps):
start = start_index[n]
active = pid[start:start+particle_count[n]]
o.history['lon'][active, n] = \
lon[start:start+particle_count[n]]
o.history['lat'][active, n] = \
lat[start:start+particle_count[n]]
o.history['status'][active, n] = 0
o.status_categories = ['active', 'missing_data']
firstlast = np.ma.notmasked_edges(o.history['status'], axis=1)
index_of_last = firstlast[1][1]
o.history['status'][np.arange(len(index_of_last)),
index_of_last] = 1
kwargs = {}
for var in ['lon', 'lat', 'status']:
kwargs[var] = o.history[var][
np.arange(len(index_of_last)), index_of_last]
kwargs['ID'] = range(num_elements)
o.elements = o.ElementType(**kwargs)
o.elements_deactivated = o.ElementType()
o.remove_deactivated_elements()
# Import time steps from metadata
o.time_step = time[1] - time[0]
o.time_step_output = o.time_step
o.start_time = time[0]
o.time = time[-1]
o.steps_output = num_timesteps
return o
def sensitivity_simulation(cls, lon=4.7, lat=60.0, z=0, readers=None,
number=1000, radius=0, seed_time=None,
time_step=3600, time_step_output=None,
duration=timedelta(hours=2),
filenames=None, recalculate=True):
if recalculate is False:
try:
o1 = cls()
o1.io_import_file('o0.nc')
o2 = cls()
o2.io_import_file('o1.nc')
return o1, o2
except:
print('Could not import')
lon = np.atleast_1d(lon)
if len(lon) == 1:
lon = [lon[0], lon[0]]
lat = np.atleast_1d(lat)
if len(lat) == 1:
lat = [lat[0], lat[0]]
try:
z[1]
except:
z = np.atleast_1d(z)
if len(z) == 1:
z = [z[0], z[0]]
radius = np.atleast_1d(radius)
for i in (0, 1):
print(i)
o = cls()
o.add_readers_from_list(readers)
if seed_time is None:
print(dir(o))
print(o.readers)
print(type(o.readers))
for r in o.readers:
seed_time = o.readers[r].start_time
break
o.seed_elements(lon=lon[i], lat=lat[i], z=z[i], number=number,
radius=radius, time=seed_time)
if time_step_output is None:
time_step_output = time_step
print(o)
print(duration, time_step, time_step_output)
#stop
eargs = {'outfile': 'o%d.nc' % i}
o.run(duration=duration, time_step=time_step,
time_step_output=time_step_output, **eargs)
if i == 1:
o1 = o
else:
o2 = o
return o1, o2
# Add timer for unittest
def setUp(self):
self._started_at = time.time()
logging.info('STARTING TEST: {}'.format(self.id()))
def tearDown(self):
elapsed = time.time() - self._started_at
logging.info('TIMING: ({}s) {}'.format(round(elapsed, 2), self.id()))
unittest.TestCase.setUp = setUp
unittest.TestCase.tearDown = tearDown
| gpl-2.0 |
JPFrancoia/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
zorroblue/scikit-learn | benchmarks/bench_sparsify.py | 50 | 3410 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features // 2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, max_iter=2000,
tol=None)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
xubenben/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
MartinThoma/algorithms | Python/string-concat/main.py | 1 | 2095 | import operator
import random
import string
import timeit
from io import StringIO
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def main():
str_list = [create_random_string(255) for i in range(1000)]
functions = [
(plus_concat, "plus_concat"),
(join_concat, "join_concat"),
(cstring_concat, "cstring_concat"),
]
duration_list = {}
for func, name in functions:
durations = timeit.repeat(lambda: func(str_list), repeat=5000, number=3)
duration_list[name] = list(np.array(durations) * 1000)
print(
"{func:<20}: "
"min: {min:0.3f}s, mean: {mean:0.3f}s, max: {max:0.3f}s".format(
func=name,
min=min(durations),
mean=np.mean(durations),
max=max(durations),
)
)
create_boxplot(duration_list)
def create_boxplot(duration_list, showfliers=False):
plt.figure(num=None, figsize=(8, 4), dpi=300, facecolor="w", edgecolor="k")
sns.set(style="whitegrid")
sorted_keys, sorted_vals = zip(
*sorted(duration_list.items(), key=operator.itemgetter(1))
)
flierprops = dict(markerfacecolor="0.75", markersize=1, linestyle="none")
ax = sns.boxplot(
data=sorted_vals,
width=0.3,
orient="h",
flierprops=flierprops,
showfliers=showfliers,
)
ax.set(xlabel="Time in ms", ylabel="")
plt.yticks(plt.yticks()[0], sorted_keys)
plt.tight_layout()
plt.savefig("output.png")
def create_random_string(N):
return "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(N)
)
def plus_concat(str_list):
total = ""
for str_ in str_list:
total += str_
return total
def join_concat(str_list):
new_list = []
for str_ in str_list:
new_list.append(str_)
return "".join(new_list)
def cstring_concat(str_list):
buf = StringIO()
for str_ in str_list:
buf.write(str_)
return buf.getvalue()
if __name__ == "__main__":
main()
| mit |
roxyboy/scikit-learn | sklearn/utils/tests/test_extmath.py | 130 | 16270 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/pylab_examples/contour_demo.py | 14 | 3478 | #!/usr/bin/env python
"""
Illustrate simple contour plotting, contours on an image with
a colorbar for the contours, and labelled contours.
See also contour_image.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Simplest default with labels')
# contour labels can be placed manually by providing list of positions
# (in data coordinate). See ginput_manual_clabel.py for interactive
# placement.
plt.figure()
CS = plt.contour(X, Y, Z)
manual_locations = [(-1, -1.4), (-0.62, -0.7), (-2, 0.5), (1.7, 1.2), (2.0, 1.4), (2.4, 1.7)]
plt.clabel(CS, inline=1, fontsize=10, manual=manual_locations)
plt.title('labels at selected locations')
# You can force all the contours to be the same color.
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours dashed')
# You can set negative contours to be solid instead of dashed:
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours solid')
# And you can manually specify the colors of the contour
plt.figure()
CS = plt.contour(X, Y, Z, 6,
linewidths=np.arange(.5, 4, .5),
colors=('r', 'green', 'blue', (1,1,0), '#afeeee', '0.5')
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Crazy lines')
# Or you can use a colormap to specify the colors; the default
# colormap will be used for the contour lines
plt.figure()
im = plt.imshow(Z, interpolation='bilinear', origin='lower',
cmap=cm.gray, extent=(-3,3,-2,2))
levels = np.arange(-1.2, 1.6, 0.2)
CS = plt.contour(Z, levels,
origin='lower',
linewidths=2,
extent=(-3,3,-2,2))
#Thicken the zero contour.
zc = CS.collections[6]
plt.setp(zc, linewidth=4)
plt.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%1.1f',
fontsize=14)
# make a colorbar for the contour lines
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.title('Lines with colorbar')
#plt.hot() # Now change the colormap for the contour lines and colorbar
plt.flag()
# We can still add a colorbar for the image, too.
CBI = plt.colorbar(im, orientation='horizontal', shrink=0.8)
# This makes the original colorbar look a bit out of place,
# so let's improve its position.
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = CB.ax.get_position().bounds
CB.ax.set_position([ll, b+0.1*h, ww, h*0.8])
plt.show()
| mit |
qifeigit/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
jmmease/pandas | pandas/errors/__init__.py | 6 | 1697 | # flake8: noqa
"""
Expose public exceptions & warnings
"""
from pandas._libs.tslib import OutOfBoundsDatetime
class PerformanceWarning(Warning):
"""
Warning raised when there is a possible
performance impact.
"""
class UnsupportedFunctionCall(ValueError):
"""
Exception raised when attempting to call a numpy function
on a pandas object, but that function is not supported by
the object e.g. ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex,
and the index has not been lexsorted. Subclass of `KeyError`.
.. versionadded:: 0.20.0
"""
class ParserError(ValueError):
"""
Exception that is raised by an error encountered in `pd.read_csv`.
"""
class DtypeWarning(Warning):
"""
Warning that is raised for a dtype incompatiblity. This
can happen whenever `pd.read_csv` encounters non-
uniform dtypes in a column(s) of a given CSV file.
"""
class EmptyDataError(ValueError):
"""
Exception that is thrown in `pd.read_csv` (by both the C and
Python engines) when empty data or header is encountered.
"""
class ParserWarning(Warning):
"""
Warning that is raised in `pd.read_csv` whenever it is necessary
to change parsers (generally from 'c' to 'python') contrary to the
one specified by the user due to lack of support or functionality for
parsing particular attributes of a CSV file with the requsted engine.
"""
class MergeError(ValueError):
"""
Error raised when problems arise during merging due to problems
with input data. Subclass of `ValueError`.
"""
| bsd-3-clause |
cxhernandez/msmbuilder | msmbuilder/utils/nearest.py | 12 | 6505 | # Author: Matthew Harrigan <matthew.p.harrigan@gmail.com>
# Contributors:
# Copyright (c) 2015, Stanford University and the Authors
# All rights reserved.
from __future__ import absolute_import, print_function, division
from scipy.spatial import KDTree as sp_KDTree
import numpy as np
from . import check_iter_of_sequences
class KDTree(object):
"""kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
This class wraps sklearn's implementation by taking a list of arrays
and returning indices of the form (traj_i, frame_i).
Parameters
----------
sequences : list of (N,K) array_like
Each array contains data points to be indexed. This array is not
copied, and so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
_allow_trajectory = False
def __init__(self, sequences, leafsize=10):
check_iter_of_sequences(sequences,
allow_trajectory=self._allow_trajectory)
self._kdtree = sp_KDTree(self._concat(sequences), leafsize=leafsize)
def query(self, x, k=1, p=2, distance_upper_bound=np.inf):
"""Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : tuple(int, int) or array of tuple(int, int)
The locations of the neighbors in self.data. Locations are
given by tuples of (traj_i, frame_i)
Examples
--------
>>> from msmbuilder.utils import KDTree
>>> X1 = 0.3 * np.random.RandomState(0).randn(500, 2)
>>> X2 = 0.3 * np.random.RandomState(1).randn(1000, 2) + 10
>>> tree = KDTree([X1, X2])
>>> pts = np.array([[0, 0], [10, 10]])
>>> tree.query(pts)
(array([ 0.0034, 0.0102]), array([[ 0, 410], [ 1, 670]]))
>>> tree.query(pts[0])
(0.0034, array([ 0, 410]))
"""
cdists, cinds = self._kdtree.query(x, k, p, distance_upper_bound)
return cdists, self._split_indices(cinds)
# concat and split code lovingly copied from MultiSequenceClusterMixin
def _concat(self, sequences):
self.__lengths = [len(s) for s in sequences]
if len(sequences) > 0 and isinstance(sequences[0], np.ndarray):
concat = np.ascontiguousarray(np.concatenate(sequences))
else:
raise TypeError('sequences must be a list of numpy arrays')
assert sum(self.__lengths) == len(concat)
return concat
def _split(self, concat):
return [concat[cl - l: cl] for (cl, l) in
zip(np.cumsum(self.__lengths), self.__lengths)]
def _split_indices(self, concat_inds):
"""Take indices in 'concatenated space' and return as pairs
of (traj_i, frame_i)
"""
clengths = np.append([0], np.cumsum(self.__lengths))
mapping = np.zeros((clengths[-1], 2), dtype=int)
for traj_i, (start, end) in enumerate(zip(clengths[:-1], clengths[1:])):
mapping[start:end, 0] = traj_i
mapping[start:end, 1] = np.arange(end - start)
return mapping[concat_inds]
| lgpl-2.1 |
pepo27/PatternRecognition | Python/SegundoParcial/mx/pepo/pattern/perceptron.py | 2 | 3439 | __author__ = 'alberto'
from random import choice
#import matplotlib.pyplot as plt
from numpy import array, dot, random
import numpy as np
import matplotlib.pyplot as plt
def perceptronOR():
unit_step = lambda x: 0 if x < 0 else 1
training_data = [
(array([0,0,1]), 0),
(array([0,1,1]), 1),
(array([1,0,1]), 1),
(array([1,1,1]), 1), ]
w = random.rand(3)
errors = []
eta = 0.2
n = 100
for i in xrange(n):
x, expected = choice(training_data)
result = dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += eta * error * x
for x, _ in training_data:
result = dot(x, w)
#print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
valores = []
for x in xrange(5):
valores.append((w[2]*x+w[1]*x+w[0]*x))
print valores #plt.plot(valores)
#plt.show()
def dot_product(values, weights):
return sum(value * weight for value, weight in zip(values, weights))
def perceptronWIKI():
threshold = 0.5
learning_rate = 0.001
weights = [0, 0, 0]
training_set = [
((1, 0, 0), 0),
((1, 0, 1), 1),
((1, 1, 1), 1),
((1, 1, 1), 1)
]
while True:
#print('-' * 60)
error_count = 0
for input_vector, desired_output in training_set:
#print(weights)
result = dot_product(input_vector, weights) > threshold
error = desired_output - result
if error != 0:
error_count += 1
for index, value in enumerate(input_vector):
weights[index] += learning_rate * error * value
if error_count == 0:
break
print weights
def perceptronPractica():
threshold = 0.5
learning_rate = 0.001
weights = [1, 1, 1, 1]
"""
x1 = np.asarray([[0, 1, 1, 1], [0, 0, 0, 1], [0, 1, 0, 0]])
x2 = np.asarray([[0, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
"""
training_set = [
((0, 0, 0 ,1), 0),
((1, 0, 1, 1), 0),
((1, 0, 0, 1), 0),
((1, 1, 0, 1), 0),
((0, 0, 1, 1), 1),
((0, 1, 1, 1), 1),
((0, 1, 0, 1), 1),
((1, 1, 1, 1), 1),
]
while True:
error_count = 0
for input_vector, desired_output in training_set:
#print(weights)
result = dot_product(input_vector, weights) > threshold
error = desired_output - result
if error != 0:
error_count += 1
for index, value in enumerate(input_vector):
weights[index] += learning_rate * error * value
if error_count == 0:
break
print weights
def perceptronAnd():
threshold = 0.5
learning_rate = 0.000001
weights = [1, 1, 1]
training_set = [
((1, 0, 0), 0),
((1, 0, 1), 0),
((1, 1, 0), 0),
((1, 1, 1), 1)
]
while True:
#print('-' * 60)
error_count = 0
for input_vector, desired_output in training_set:
#print(weights)
result = dot_product(input_vector, weights) > threshold
error = desired_output - result
if error != 0:
error_count += 1
for index, value in enumerate(input_vector):
weights[index] += learning_rate * error * value
if error_count == 0:
break
plt3d = plt.figure().gca(projection='3d')
plt3d.plot([0], [0], [0],c='r',marker='o')
plt3d.plot([1], [0], [1],c='r',marker='o')
plt3d.plot([1], [0], [0],c='r',marker='o')
plt3d.plot([1], [1], [0],c='r',marker='o')
plt3d.plot([0], [0], [1],c='r',marker='o')
plt3d.plot([0], [1], [1],c='r',marker='o')
plt3d.plot([0], [1], [0],c='r',marker='o')
plt3d.plot([1], [1], [1],c='r',marker='o')
#plt3d.plot_surface(xx,yy,z2, color='yellow')
#plt3d.plot_surface(xx,yy,z3, color='cyan')
plt.show()
#perceptronPractica()
perceptronAnd()
#perceptronWIKI()
#perceptronOR() | gpl-2.0 |
teonlamont/mne-python | mne/decoding/tests/test_ems.py | 2 | 3389 | # Author: Denis A. Engemann <d.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import pytest
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_version, check_version, run_tests_if_main
from mne.decoding import compute_ems, EMS
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_version('sklearn', '0.15')
def test_ems():
"""Test event-matched spatial filters."""
raw = io.read_raw_fif(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
pytest.raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs.equalize_event_counts(epochs.event_id)
pytest.raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs.equalize_event_counts(epochs.event_id)
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
pytest.raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
# test compute_ems cv
epochs = epochs['aud_r', 'vis_l']
epochs.equalize_event_counts(epochs.event_id)
if check_version('sklearn', '0.18'):
from sklearn.model_selection import StratifiedKFold
cv = StratifiedKFold()
else:
from sklearn.cross_validation import StratifiedKFold
cv = StratifiedKFold(epochs.events[:, 2])
compute_ems(epochs, cv=cv)
compute_ems(epochs, cv=2)
pytest.raises(ValueError, compute_ems, epochs, cv='foo')
pytest.raises(ValueError, compute_ems, epochs, cv=len(epochs) + 1)
raw.close()
# EMS transformer, check that identical to compute_ems
X = epochs.get_data()
y = epochs.events[:, 2]
X = X / np.std(X) # X scaled outside cv in compute_ems
Xt, coefs = list(), list()
ems = EMS()
assert_equal(ems.__repr__(), '<EMS: not fitted.>')
# manual leave-one-out to avoid sklearn version problem
for test in range(len(y)):
train = np.setdiff1d(range(len(y)), np.atleast_1d(test))
ems.fit(X[train], y[train])
coefs.append(ems.filters_)
Xt.append(ems.transform(X[[test]]))
assert_equal(ems.__repr__(), '<EMS: fitted with 4 filters on 2 classes.>')
assert_array_almost_equal(filters, np.mean(coefs, axis=0))
assert_array_almost_equal(surrogates, np.vstack(Xt))
run_tests_if_main()
| bsd-3-clause |
jmetzen/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
biocyberman/bcbio-nextgen | bcbio/structural/prioritize.py | 1 | 10138 | """Prioritize structural variants based on biological information.
Provides high level summaries of structural variants in regions of interest,
as defined by the input configuration. Tries to narrow structural variant calls
based on potential biological targets.
"""
import os
import pandas as pd
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bedutils, vcfutils
from bcbio.structural import lumpy
POST_PRIOR_FNS = {"lumpy": lumpy.run_svtyper_prioritize}
def run(items):
assert len(items) == 1, ("Expect one input to biological prioritization: %s" %
", ".join([dd.get_sample_name(d) for d in items]))
data = items[0]
inputs = []
for call in data.get("sv", []):
vcf_file = call.get("vcf_file", call.get("vrn_file"))
if vcf_file and vcf_file.endswith((".vcf", "vcf.gz")):
pp_fn = POST_PRIOR_FNS.get(call["variantcaller"])
if pp_fn:
pp_fn = pp_fn(call)
inputs.append((call["variantcaller"], vcf_file, pp_fn))
if len(inputs) > 0:
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if not prioritize_by:
raise ValueError("Missing structural variant prioritization with `svprioritize`")
work_dir = _sv_workdir(data)
priority_files = [_prioritize_vcf(vcaller, vfile, prioritize_by, post_prior_fn, work_dir, data)
for vcaller, vfile, post_prior_fn in inputs]
priority_tsv = _combine_files([xs[0] for xs in priority_files], work_dir, data)
data["sv"].append({"variantcaller": "sv-prioritize", "vrn_file": priority_tsv,
"raw_files": dict(zip([xs[0] for xs in inputs], [xs[1] for xs in priority_files]))})
data = _cnv_prioritize(data)
return [data]
def is_gene_list(bed_file):
"""Check if the file is only a list of genes, not a BED
"""
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
if len(line.split()) == 1:
return True
else:
return False
def _find_gene_list_from_bed(bed_file, base_file, data):
"""Retrieve list of gene names from input BED file.
"""
# Check for a gene list, we can just return that.
if is_gene_list(bed_file):
return bed_file
out_file = "%s-genes.txt" % utils.splitext_plus(base_file)[0]
if not os.path.exists(out_file):
genes = set([])
import pybedtools
for r in pybedtools.BedTool(bed_file):
if r.name:
if not r.name.startswith("{"):
genes.add(r.name)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
if len(genes) > 0:
out_handle.write("\n".join(sorted(list(genes))) + "\n")
if utils.file_exists(out_file):
return out_file
def _prioritize_vcf(caller, vcf_file, prioritize_by, post_prior_fn, work_dir, data):
"""Provide prioritized tab delimited output for a single caller.
"""
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-%s-prioritize.tsv" % (sample, caller))
simple_vcf = os.path.join(work_dir, "%s-%s-simple.vcf.gz" % (sample, caller))
if not utils.file_exists(simple_vcf):
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
# If we have a standard gene list we can skip BED based prioritization
priority_vcf = "%s.vcf.gz" % utils.splitext_plus(out_file)[0]
if gene_list:
if vcf_file.endswith(".vcf.gz"):
utils.symlink_plus(vcf_file, priority_vcf)
else:
assert vcf_file.endswith(".vcf")
utils.symlink_plus(vcf_file, priority_vcf.replace(".vcf.gz", ".vcf"))
vcfutils.bgzip_and_index(priority_vcf.replace(".vcf.gz", ".vcf"),
data["config"], remove_orig=False)
# otherwise prioritize based on BED and proceed
else:
if not utils.file_exists(priority_vcf):
with file_transaction(data, priority_vcf) as tx_out_file:
resources = config_utils.get_resources("bcbio_prioritize", data["config"])
jvm_opts = " ".join(resources.get("jvm_opts", ["-Xms1g", "-Xmx4g"]))
export = utils.local_path_export()
cmd = ("{export} bcbio-prioritize {jvm_opts} known -i {vcf_file} -o {tx_out_file} "
" -k {prioritize_by}")
do.run(cmd.format(**locals()), "Prioritize: select in known regions of interest")
data_dir = os.path.dirname(os.path.realpath(utils.which("simple_sv_annotation.py")))
with file_transaction(data, simple_vcf) as tx_out_file:
fusion_file = os.path.join(data_dir, "fusion_pairs.txt")
opts = ""
if os.path.exists(fusion_file):
opts += " --known_fusion_pairs %s" % fusion_file
if not gene_list:
opts += " --gene_list %s" % os.path.join(data_dir, "az-cancer-panel.txt")
else:
opts += " --gene_list %s" % gene_list
cmd = "simple_sv_annotation.py {opts} -o - {priority_vcf} | bgzip -c > {tx_out_file}"
do.run(cmd.format(**locals()), "Prioritize: simplified annotation output")
simple_vcf = vcfutils.bgzip_and_index(vcfutils.sort_by_ref(simple_vcf, data), data["config"])
if post_prior_fn:
simple_vcf = post_prior_fn(simple_vcf, work_dir, data)
if not utils.file_uptodate(out_file, simple_vcf):
with file_transaction(data, out_file) as tx_out_file:
export = utils.local_path_export()
cmd = ("{export} zcat {simple_vcf} | vawk -v SNAME={sample} -v CALLER={caller} "
"""'{{if (($7 == "PASS" || $7 == ".") && (S${sample}$GT != "0/0")) """
"print CALLER,SNAME,$1,$2,I$END,"
"""I$SVTYPE=="BND" ? I$SVTYPE":"$3":"I$MATEID : I$SVTYPE,"""
"I$LOF,I$SIMPLE_ANN,"
"S${sample}$SR,S${sample}$PE,S${sample}$PR}}' > {tx_out_file}")
do.run(cmd.format(**locals()), "Prioritize: convert to tab delimited")
return out_file, simple_vcf
def _combine_files(tsv_files, work_dir, data):
"""Combine multiple priority tsv files into a final sorted output.
"""
header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype",
"lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"])
sample = dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
tmpdir = os.path.dirname(tx_out_file)
input_files = " ".join(tsv_files)
sort_cmd = bedutils.get_sort_cmd()
cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -T {tmpdir} -k3,3 -k4,4n; }} > {tx_out_file}"
do.run(cmd.format(**locals()), "Combine prioritized from multiple callers")
return out_file
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "prioritize"))
# ## CNV prioritization by genes of interest and confidence intervals
def _cnvkit_prioritize(sample, genes, allele_file, metrics_file):
"""Summarize non-diploid calls with copy numbers and confidence intervals.
"""
mdf = pd.read_table(metrics_file)
mdf.columns = [x.lower() for x in mdf.columns]
if len(genes) > 0:
mdf = mdf[mdf["gene"].str.contains("|".join(genes))]
mdf = mdf[["chromosome", "start", "end", "gene", "log2", "ci_hi", "ci_lo"]]
adf = pd.read_table(allele_file)
if len(genes) > 0:
adf = adf[adf["gene"].str.contains("|".join(genes))]
if "cn1" in adf.columns and "cn2" in adf.columns:
adf = adf[["chromosome", "start", "end", "cn", "cn1", "cn2"]]
else:
adf = adf[["chromosome", "start", "end", "cn"]]
df = pd.merge(mdf, adf, on=["chromosome", "start", "end"])
df = df[df["cn"] != 2]
if len(df) > 0:
def passes(row):
spread = abs(row["ci_hi"] - row["ci_lo"])
return spread < 0.25
df["passes"] = df.apply(passes, axis=1)
df.insert(0, "sample", [sample] * len(df))
return df
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data
| mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/neighbors/tests/test_ball_tree.py | 26 | 10379 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils import check_random_state
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=rng.random_sample(DIMENSION)),
'wminkowski': dict(p=3, w=rng.random_sample(DIMENSION)),
'mahalanobis': dict(V=V_mahalanobis)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
rng = check_random_state(0)
X = rng.random_sample((40, DIMENSION))
Y = rng.random_sample((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
rng = check_random_state(0)
X = rng.random_sample((40, 10)).round(0)
Y = rng.random_sample((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
rng = check_random_state(0)
X = (4 * rng.random_sample((40, 10))).round(0)
Y = (4 * rng.random_sample((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, h, atol, rtol, breadth_first, bt, Y, dens_true):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
def test_ball_tree_kde(n_samples=100, n_features=3):
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first, bt, Y, dens_true)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
rng = check_random_state(0)
x_in = rng.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
rng = check_random_state(0)
X = rng.random_sample((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = rng.random_sample(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = rng.random_sample(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = rng.random_sample((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
rng = check_random_state(0)
X = 2 * np.pi * rng.random_sample((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| mit |
jmikko/fairnessML | zafar_methods/disparate_mistreatment/synthetic_data_demo/fairness_acc_tradeoff.py | 1 | 11588 | import os, sys
import numpy as np
from generate_synthetic_data import *
sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory
import utils as ut
import funcs_disp_mist as fdm
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import plot_syn_boundaries as psb
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
def test_synthetic_data():
""" Generate the synthetic data """
data_type = 1
X, y, x_control = generate_synthetic_data(data_type=data_type,
plot_data=False) # set plot_data to False to skip the data plot
sensitive_attrs = x_control.keys()
""" Split the data into train and test """
train_fold_size = 0.5
x_train, y_train, x_control_train, x_test, y_test, x_control_test = ut.split_into_train_test(X, y, x_control,
train_fold_size)
cons_params = None # constraint parameters, will use them later
loss_function = "logreg" # perform the experiments with logistic regression
EPS = 1e-4
def train_test_classifier():
w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)
train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test = fdm.get_clf_stats(
w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)
# accuracy and FPR are for the test because we need of for plotting
# the covariance is for train, because we need it for setting the thresholds
return w, test_score, s_attr_to_fp_fn_test, cov_all_train
""" Classify the data while optimizing for accuracy """
print("== Unconstrained (original) classifier ==")
w_uncons, acc_uncons, s_attr_to_fp_fn_test_uncons, cov_all_train_uncons = train_test_classifier()
print("\n-----------------------------------------------------------------------------------\n")
""" Now classify such that we optimize for accuracy while achieving perfect fairness """
print("== Classifier with fairness constraint ==")
it = 0.05
mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
acc_arr = []
fpr_per_group = {0: [], 1: []}
fnr_per_group = {0: [], 1: []}
cons_type = 1 # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
tau = 5.0
mu = 1.2
for m in mult_range:
sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
for s_attr in sensitive_attrs_to_cov_thresh.keys():
for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
cons_params = {"cons_type": cons_type,
"tau": tau,
"mu": mu,
"sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh}
w_cons, acc_cons, s_attr_to_fp_fn_test_cons, cov_all_train_cons = train_test_classifier()
fpr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fpr"])
fpr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fpr"])
fnr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fnr"])
fnr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fnr"])
acc_arr.append(acc_cons)
fs = 15
ax = plt.subplot(2, 1, 1)
plt.plot(mult_range, fpr_per_group[0], "-o", color="green", label="Group-0")
plt.plot(mult_range, fpr_per_group[1], "-o", color="blue", label="Group-1")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.ylabel('False positive rate', fontsize=fs)
ax.legend(fontsize=fs)
ax = plt.subplot(2, 1, 2)
plt.plot(mult_range, acc_arr, "-o", color="green", label="")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.xlabel('Covariance multiplicative factor (m)', fontsize=fs)
plt.ylabel('Accuracy', fontsize=fs)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.savefig("img/fairness_acc_tradeoff_cons_type_%d.png" % cons_type)
plt.show()
return
if __name__ == '__main__':
# test_synthetic_data()
from load_data import load_binary_diabetes_uci, load_heart_uci, load_breast_cancer, load_adult, load_adult_race
from sklearn import svm
from sklearn.metrics import accuracy_score
import numpy as np
from measures import equalized_odds_measure_TP, equalized_odds_measure_FP, \
equalized_odds_measure_TP_from_list_of_sensfeat
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from scipy.optimize import linprog
from hardt import gamma_y_hat, HardtMethod
from scipy.spatial import ConvexHull
from collections import namedtuple
experiment_number = 0
if experiment_number == 0:
dataset_train = load_binary_diabetes_uci()
dataset_test = load_binary_diabetes_uci()
sensible_feature = 1 # sex
elif experiment_number == 1:
dataset_train = load_heart_uci()
dataset_test = load_heart_uci()
sensible_feature = 1 # sex
elif experiment_number == 2:
dataset_train, dataset_test = load_adult(smaller=False)
sensible_feature = 9 # sex
print('Different values of the sensible feature', sensible_feature, ':',
set(dataset_train.data[:, sensible_feature]))
elif experiment_number == 3:
dataset_train, dataset_test = load_adult_race(smaller=False)
sensible_feature = 8 # race
print('Different values of the sensible feature', sensible_feature, ':',
set(dataset_train.data[:, sensible_feature]))
if experiment_number in [0, 1]:
# % for train
ntrain = 5 * len(dataset_train.target) // 10
dataset_train.data = dataset_train.data[:ntrain, :]
dataset_train.target = dataset_train.target[:ntrain]
dataset_test.data = dataset_test.data[ntrain:, :]
dataset_test.target = dataset_test.target[ntrain:]
if experiment_number in [2, 3]:
ntrain = len(dataset_test.target)
# Standard SVM
# Train an SVM using the training set
print('Grid search...')
grid_search_complete = 0
if grid_search_complete:
param_grid = [
{'C': [0.1, 0.5, 1, 10, 100, 1000], 'kernel': ['linear']},
# {'C': [0.1, 0.5, 1, 10, 100, 1000], 'gamma': ['auto', 0.001, 0.0001], 'kernel': ['rbf']},
]
else:
param_grid = [{'C': [10.0], 'kernel': ['linear'], 'gamma': ['auto']}]
svc = svm.SVC()
clf = GridSearchCV(svc, param_grid, n_jobs=1)
clf.fit(dataset_train.data, dataset_train.target)
print('Y:', clf.best_estimator_)
# Accuracy
pred = clf.predict(dataset_test.data)
pred_train = clf.predict(dataset_train.data)
print('Accuracy test:', accuracy_score(dataset_test.target, pred))
print('Accuracy train:', accuracy_score(dataset_train.target, pred_train))
# Fairness measure
print('Eq. opp. test: \n',
equalized_odds_measure_TP(dataset_test, clf, [sensible_feature], ylabel=1))
print('Eq. opp. train: \n',
equalized_odds_measure_TP(dataset_train, clf, [sensible_feature], ylabel=1))
# Zafar method
""" Generate the synthetic data """
X, y, x_control = dataset_train.data, dataset_train.target, {"s1": dataset_train.data[:, sensible_feature]}
sensitive_attrs = x_control.keys()
""" Split the data into train and test """
train_fold_size = 0.5
x_train, y_train, x_control_train, x_test, y_test, x_control_test = dataset_train.data[:, :sensible_feature] + dataset_train.data[:, sensible_feature+1:], dataset_train.target, {"s1": dataset_train.data[:, sensible_feature]}, \
dataset_test.data[:, :sensible_feature] + dataset_test.data[:, sensible_feature + 1:], dataset_test.target, {"s1": dataset_test.data[:, sensible_feature]}
cons_params = None # constraint parameters, will use them later
loss_function = "logreg" # perform the experiments with logistic regression
EPS = 1e-4
def train_test_classifier():
w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)
train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test = fdm.get_clf_stats(
w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)
# accuracy and FPR are for the test because we need of for plotting
# the covariance is for train, because we need it for setting the thresholds
return w, test_score, s_attr_to_fp_fn_test, cov_all_train
""" Classify the data while optimizing for accuracy """
print("== Unconstrained (original) classifier ==")
w_uncons, acc_uncons, s_attr_to_fp_fn_test_uncons, cov_all_train_uncons = train_test_classifier()
print("\n-----------------------------------------------------------------------------------\n")
""" Now classify such that we optimize for accuracy while achieving perfect fairness """
print("== Classifier with fairness constraint ==")
it = 0.05
mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
acc_arr = []
fpr_per_group = {0: [], 1: []}
fnr_per_group = {0: [], 1: []}
cons_type = 1 # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
tau = 5.0
mu = 1.2
for m in mult_range:
sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
for s_attr in sensitive_attrs_to_cov_thresh.keys():
for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
cons_params = {"cons_type": cons_type,
"tau": tau,
"mu": mu,
"sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh}
w_cons, acc_cons, s_attr_to_fp_fn_test_cons, cov_all_train_cons = train_test_classifier()
fpr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fpr"])
fpr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fpr"])
fnr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fnr"])
fnr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fnr"])
acc_arr.append(acc_cons)
fs = 15
ax = plt.subplot(2, 1, 1)
plt.plot(mult_range, fpr_per_group[0], "-o", color="green", label="Group-0")
plt.plot(mult_range, fpr_per_group[1], "-o", color="blue", label="Group-1")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.ylabel('False positive rate', fontsize=fs)
ax.legend(fontsize=fs)
ax = plt.subplot(2, 1, 2)
plt.plot(mult_range, acc_arr, "-o", color="green", label="")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.xlabel('Covariance multiplicative factor (m)', fontsize=fs)
plt.ylabel('Accuracy', fontsize=fs)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.savefig("img/fairness_acc_tradeoff_cons_type_%d.png" % cons_type)
plt.show()
| gpl-3.0 |
LiaoPan/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
ChanderG/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/plugins/externalconsole.py | 2 | 64194 | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""External Console plugin"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Qt imports
from spyderlib.qt.QtGui import (QVBoxLayout, QMessageBox, QInputDialog,
QLineEdit, QPushButton, QGroupBox, QLabel,
QTabWidget, QFontComboBox, QHBoxLayout,
QButtonGroup)
from spyderlib.qt.QtCore import SIGNAL, Qt
from spyderlib.qt.compat import getopenfilename
# Stdlib imports
import atexit
import os
import os.path as osp
import sys
import subprocess
from pytank.Core import Settings
# Local imports
from spyderlib.baseconfig import SCIENTIFIC_STARTUP, running_in_mac_app, _
from spyderlib.config import CONF
from spyderlib.utils import programs
from spyderlib.utils.misc import (get_error_match, get_python_executable,
remove_backslashes, is_python_script)
from spyderlib.utils.qthelpers import get_icon, create_action, mimedata2url
from spyderlib.widgets.tabs import Tabs
from spyderlib.widgets.externalshell.pythonshell import ExternalPythonShell
from spyderlib.widgets.externalshell.systemshell import ExternalSystemShell
from spyderlib.widgets.findreplace import FindReplace
from spyderlib.plugins import SpyderPluginWidget, PluginConfigPage
from spyderlib.plugins.runconfig import get_run_configuration
from spyderlib.py3compat import to_text_string, is_text_string, getcwd
from spyderlib import dependencies
MPL_REQVER = '>=1.0'
dependencies.add("matplotlib", _("Interactive data plotting in the consoles"),
required_version=MPL_REQVER)
class ExternalConsoleConfigPage(PluginConfigPage):
def __init__(self, plugin, parent):
PluginConfigPage.__init__(self, plugin, parent)
self.get_name = lambda: _("Console")
self.cus_exec_radio = None
self.pyexec_edit = None
def initialize(self):
PluginConfigPage.initialize(self)
self.connect(self.pyexec_edit, SIGNAL("textChanged(QString)"),
self.python_executable_changed)
self.connect(self.cus_exec_radio, SIGNAL("toggled(bool)"),
self.python_executable_switched)
def setup_page(self):
interface_group = QGroupBox(_("Interface"))
font_group = self.create_fontgroup(option=None, text=None,
fontfilters=QFontComboBox.MonospacedFonts)
newcb = self.create_checkbox
singletab_box = newcb(_("One tab per script"), 'single_tab')
showtime_box = newcb(_("Show elapsed time"), 'show_elapsed_time')
icontext_box = newcb(_("Show icons and text"), 'show_icontext')
# Interface Group
interface_layout = QVBoxLayout()
interface_layout.addWidget(singletab_box)
interface_layout.addWidget(showtime_box)
interface_layout.addWidget(icontext_box)
interface_group.setLayout(interface_layout)
# Source Code Group
display_group = QGroupBox(_("Source code"))
buffer_spin = self.create_spinbox(
_("Buffer: "), _(" lines"),
'max_line_count', min_=0, max_=1000000, step=100,
tip=_("Set maximum line count"))
wrap_mode_box = newcb(_("Wrap lines"), 'wrap')
merge_channels_box = newcb(
_("Merge process standard output/error channels"),
'merge_output_channels',
tip=_("Merging the output channels of the process means that\n"
"the standard error won't be written in red anymore,\n"
"but this has the effect of speeding up display."))
colorize_sys_stderr_box = newcb(
_("Colorize standard error channel using ANSI escape codes"),
'colorize_sys_stderr',
tip=_("This method is the only way to have colorized standard\n"
"error channel when the output channels have been "
"merged."))
self.connect(merge_channels_box, SIGNAL("toggled(bool)"),
colorize_sys_stderr_box.setEnabled)
self.connect(merge_channels_box, SIGNAL("toggled(bool)"),
colorize_sys_stderr_box.setChecked)
colorize_sys_stderr_box.setEnabled(
self.get_option('merge_output_channels'))
display_layout = QVBoxLayout()
display_layout.addWidget(buffer_spin)
display_layout.addWidget(wrap_mode_box)
display_layout.addWidget(merge_channels_box)
display_layout.addWidget(colorize_sys_stderr_box)
display_group.setLayout(display_layout)
# Background Color Group
bg_group = QGroupBox(_("Background color"))
bg_label = QLabel(_("This option will be applied the next time "
"a Python console or a terminal is opened."))
bg_label.setWordWrap(True)
lightbg_box = newcb(_("Light background (white color)"),
'light_background')
bg_layout = QVBoxLayout()
bg_layout.addWidget(bg_label)
bg_layout.addWidget(lightbg_box)
bg_group.setLayout(bg_layout)
# Advanced settings
source_group = QGroupBox(_("Source code"))
completion_box = newcb(_("Automatic code completion"),
'codecompletion/auto')
case_comp_box = newcb(_("Case sensitive code completion"),
'codecompletion/case_sensitive')
comp_enter_box = newcb(_("Enter key selects completion"),
'codecompletion/enter_key')
calltips_box = newcb(_("Display balloon tips"), 'calltips')
source_layout = QVBoxLayout()
source_layout.addWidget(completion_box)
source_layout.addWidget(case_comp_box)
source_layout.addWidget(comp_enter_box)
source_layout.addWidget(calltips_box)
source_group.setLayout(source_layout)
# UMR Group
umr_group = QGroupBox(_("User Module Reloader (UMR)"))
umr_label = QLabel(_("UMR forces Python to reload modules which were "
"imported when executing a \nscript in the "
"external console with the 'runfile' function."))
umr_enabled_box = newcb(_("Enable UMR"), 'umr/enabled',
msg_if_enabled=True, msg_warning=_(
"This option will enable the User Module Reloader (UMR) "
"in Python/IPython consoles. UMR forces Python to "
"reload deeply modules during import when running a "
"Python script using the Spyder's builtin function "
"<b>runfile</b>."
"<br><br><b>1.</b> UMR may require to restart the "
"console in which it will be called "
"(otherwise only newly imported modules will be "
"reloaded when executing scripts)."
"<br><br><b>2.</b> If errors occur when re-running a "
"PyQt-based program, please check that the Qt objects "
"are properly destroyed (e.g. you may have to use the "
"attribute <b>Qt.WA_DeleteOnClose</b> on your main "
"window, using the <b>setAttribute</b> method)"),
)
umr_verbose_box = newcb(_("Show reloaded modules list"),
'umr/verbose', msg_info=_(
"Please note that these changes will "
"be applied only to new consoles"))
umr_namelist_btn = QPushButton(
_("Set UMR excluded (not reloaded) modules"))
self.connect(umr_namelist_btn, SIGNAL('clicked()'),
self.plugin.set_umr_namelist)
umr_layout = QVBoxLayout()
umr_layout.addWidget(umr_label)
umr_layout.addWidget(umr_enabled_box)
umr_layout.addWidget(umr_verbose_box)
umr_layout.addWidget(umr_namelist_btn)
umr_group.setLayout(umr_layout)
# Python executable Group
pyexec_group = QGroupBox(_("Python executable"))
pyexec_bg = QButtonGroup(pyexec_group)
pyexec_label = QLabel(_("Select the Python interpreter executable "
"binary in which Spyder will run scripts:"))
def_exec_radio = self.create_radiobutton(
_("Default (i.e. the same as Spyder's)"),
'pythonexecutable/default',
button_group=pyexec_bg)
self.cus_exec_radio = self.create_radiobutton(
_("Use the following Python interpreter:"),
'pythonexecutable/custom',
button_group=pyexec_bg)
if os.name == 'nt':
filters = _("Executables")+" (*.exe)"
else:
filters = None
pyexec_file = self.create_browsefile('', 'pythonexecutable',
filters=filters)
for le in self.lineedits:
if self.lineedits[le][0] == 'pythonexecutable':
self.pyexec_edit = le
self.connect(def_exec_radio, SIGNAL("toggled(bool)"),
pyexec_file.setDisabled)
self.connect(self.cus_exec_radio, SIGNAL("toggled(bool)"),
pyexec_file.setEnabled)
pyexec_layout = QVBoxLayout()
pyexec_layout.addWidget(pyexec_label)
pyexec_layout.addWidget(def_exec_radio)
pyexec_layout.addWidget(self.cus_exec_radio)
pyexec_layout.addWidget(pyexec_file)
pyexec_group.setLayout(pyexec_layout)
# PYTHONSTARTUP replacement
pystartup_group = QGroupBox(_("PYTHONSTARTUP replacement"))
pystartup_bg = QButtonGroup(pystartup_group)
pystartup_label = QLabel(_("This option will override the "
"PYTHONSTARTUP environment variable which\n"
"defines the script to be executed during "
"the Python console startup."))
def_startup_radio = self.create_radiobutton(
_("Default PYTHONSTARTUP script"),
'pythonstartup/default',
button_group=pystartup_bg)
cus_startup_radio = self.create_radiobutton(
_("Use the following startup script:"),
'pythonstartup/custom',
button_group=pystartup_bg)
pystartup_file = self.create_browsefile('', 'pythonstartup', '',
filters=_("Python scripts")+\
" (*.py)")
self.connect(def_startup_radio, SIGNAL("toggled(bool)"),
pystartup_file.setDisabled)
self.connect(cus_startup_radio, SIGNAL("toggled(bool)"),
pystartup_file.setEnabled)
pystartup_layout = QVBoxLayout()
pystartup_layout.addWidget(pystartup_label)
pystartup_layout.addWidget(def_startup_radio)
pystartup_layout.addWidget(cus_startup_radio)
pystartup_layout.addWidget(pystartup_file)
pystartup_group.setLayout(pystartup_layout)
# Monitor Group
monitor_group = QGroupBox(_("Monitor"))
monitor_label = QLabel(_("The monitor provides introspection "
"features to console: code completion, "
"calltips and variable explorer. "
"Because it relies on several modules, "
"disabling the monitor may be useful "
"to accelerate console startup."))
monitor_label.setWordWrap(True)
monitor_box = newcb(_("Enable monitor"), 'monitor/enabled')
for obj in (completion_box, case_comp_box, comp_enter_box,
calltips_box):
self.connect(monitor_box, SIGNAL("toggled(bool)"), obj.setEnabled)
obj.setEnabled(self.get_option('monitor/enabled'))
monitor_layout = QVBoxLayout()
monitor_layout.addWidget(monitor_label)
monitor_layout.addWidget(monitor_box)
monitor_group.setLayout(monitor_layout)
# Qt Group
opts = [(_("Default library"), 'default'), ('PyQt4', 'pyqt'),
('PySide', 'pyside')]
qt_group = QGroupBox(_("Qt (PyQt/PySide)"))
qt_setapi_box = self.create_combobox(
_("Qt-Python bindings library selection:"), opts,
'qt/api', default='default',
tip=_("This option will act on<br> "
"libraries such as Matplotlib, guidata "
"or ETS"))
if self.get_option('pythonexecutable/default'):
interpreter = get_python_executable()
else:
interpreter = self.get_option('pythonexecutable')
has_pyqt4 = programs.is_module_installed('PyQt4',
interpreter=interpreter)
has_pyside = programs.is_module_installed('PySide',
interpreter=interpreter)
if has_pyside and not has_pyqt4:
self.set_option('qt/api', 'pyside')
qt_layout = QVBoxLayout()
qt_layout.addWidget(qt_setapi_box)
qt_group.setLayout(qt_layout)
qt_group.setEnabled(has_pyqt4 or has_pyside)
# PyQt Group
if has_pyqt4:
pyqt_group = QGroupBox(_("PyQt"))
setapi_box = self.create_combobox(
_("API selection for QString and QVariant objects:"),
((_("Default API"), 0), (_("API #1"), 1), (_("API #2"), 2)),
'pyqt/api_version', default=0,
tip=_("PyQt API #1 is the default <br>"
"API for Python 2. PyQt API #2 is "
"the default API for Python 3 and "
"is compatible with PySide."))
ignore_api_box = newcb(_("Ignore API change errors (sip.setapi)"),
'pyqt/ignore_sip_setapi_errors',
tip=_("Enabling this option will ignore <br>"
"errors when changing PyQt API. As "
"PyQt does not support dynamic API "
"changes, it is strongly recommended "
"to use this feature wisely, e.g. "
"for debugging purpose."))
try:
from sip import setapi #analysis:ignore
except ImportError:
setapi_box.setDisabled(True)
ignore_api_box.setDisabled(True)
pyqt_layout = QVBoxLayout()
pyqt_layout.addWidget(setapi_box)
pyqt_layout.addWidget(ignore_api_box)
pyqt_group.setLayout(pyqt_layout)
qt_layout.addWidget(pyqt_group)
# Matplotlib Group
mpl_group = QGroupBox(_("Matplotlib"))
mpl_backend_box = newcb('', 'matplotlib/backend/enabled', True)
mpl_backend_edit = self.create_lineedit(_("GUI backend:"),
'matplotlib/backend/value', "Qt4Agg",
tip=_("Set the GUI toolkit used by <br>"
"Matplotlib to show figures "
"(default: Qt4Agg)"),
alignment=Qt.Horizontal)
self.connect(mpl_backend_box, SIGNAL("toggled(bool)"),
mpl_backend_edit.setEnabled)
mpl_backend_layout = QHBoxLayout()
mpl_backend_layout.addWidget(mpl_backend_box)
mpl_backend_layout.addWidget(mpl_backend_edit)
mpl_backend_edit.setEnabled(
self.get_option('matplotlib/backend/enabled'))
mpl_installed = programs.is_module_installed('matplotlib')
mpl_layout = QVBoxLayout()
mpl_layout.addLayout(mpl_backend_layout)
mpl_group.setLayout(mpl_layout)
mpl_group.setEnabled(mpl_installed)
# ETS Group
ets_group = QGroupBox(_("Enthought Tool Suite"))
ets_label = QLabel(_("Enthought Tool Suite (ETS) supports "
"PyQt4 (qt4) and wxPython (wx) graphical "
"user interfaces."))
ets_label.setWordWrap(True)
ets_edit = self.create_lineedit(_("ETS_TOOLKIT:"), 'ets_backend',
alignment=Qt.Horizontal)
ets_layout = QVBoxLayout()
ets_layout.addWidget(ets_label)
ets_layout.addWidget(ets_edit)
ets_group.setLayout(ets_layout)
ets_group.setEnabled(programs.is_module_installed(
"enthought.etsconfig.api",
interpreter=interpreter))
tabs = QTabWidget()
tabs.addTab(self.create_tab(font_group, interface_group, display_group,
bg_group),
_("Display"))
tabs.addTab(self.create_tab(monitor_group, source_group),
_("Introspection"))
tabs.addTab(self.create_tab(pyexec_group, pystartup_group, umr_group),
_("Advanced settings"))
tabs.addTab(self.create_tab(qt_group, mpl_group, ets_group),
_("External modules"))
vlayout = QVBoxLayout()
vlayout.addWidget(tabs)
self.setLayout(vlayout)
def _auto_change_qt_api(self, pyexec):
"""Change automatically Qt API depending on
selected Python executable"""
has_pyqt4 = programs.is_module_installed('PyQt4', interpreter=pyexec)
has_pyside = programs.is_module_installed('PySide', interpreter=pyexec)
for cb in self.comboboxes:
if self.comboboxes[cb][0] == 'qt/api':
qt_setapi_cb = cb
if has_pyside and not has_pyqt4:
qt_setapi_cb.setCurrentIndex(2)
elif has_pyqt4 and not has_pyside:
qt_setapi_cb.setCurrentIndex(1)
else:
qt_setapi_cb.setCurrentIndex(0)
def python_executable_changed(self, pyexec):
"""Custom Python executable value has been changed"""
if not self.cus_exec_radio.isChecked():
return
if not is_text_string(pyexec):
pyexec = to_text_string(pyexec.toUtf8(), 'utf-8')
old_pyexec = self.get_option("pythonexecutable",
get_python_executable())
if pyexec != old_pyexec:
self._auto_change_qt_api(pyexec)
self.warn_python_compatibility(pyexec)
def python_executable_switched(self, custom):
"""Python executable default/custom radio button has been toggled"""
def_pyexec = get_python_executable()
cust_pyexec = self.pyexec_edit.text()
if not is_text_string(cust_pyexec):
cust_pyexec = to_text_string(cust_pyexec.toUtf8(), 'utf-8')
if def_pyexec != cust_pyexec:
pyexec = cust_pyexec if custom else def_pyexec
self._auto_change_qt_api(pyexec)
if custom:
self.warn_python_compatibility(cust_pyexec)
def warn_python_compatibility(self, pyexec):
if not osp.isfile(pyexec):
return
spyder_version = sys.version_info[0]
try:
cmd = [pyexec, "-c", "import sys; print(sys.version_info[0])"]
# subprocess.check_output is not present in python2.6 and 3.0
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
console_version = int(process.communicate()[0])
except IOError:
console_version = spyder_version
if spyder_version != console_version:
QMessageBox.warning(self, _('Warning'),
_("You selected a <b>Python %d</b> interpreter for the console "
"but Spyder is running on <b>Python %d</b>!.<br><br>"
"Although this is possible, we recommend you to install and "
"run Spyder directly with your selected interpreter, to avoid "
"seeing false warnings and errors due to the incompatible "
"syntax between these two Python versions."
) % (console_version, spyder_version), QMessageBox.Ok)
class ExternalConsole(SpyderPluginWidget):
"""
Console widget
"""
CONF_SECTION = 'console'
CONFIGWIDGET_CLASS = ExternalConsoleConfigPage
DISABLE_ACTIONS_WHEN_HIDDEN = False
def __init__(self, parent, light_mode):
SpyderPluginWidget.__init__(self, parent)
self.light_mode = light_mode
self.setMinimumHeight(Settings.External_Console_Min_Height)
self.tabwidget = None
self.menu_actions = None
self.inspector = None # Object inspector plugin
self.historylog = None # History log plugin
self.variableexplorer = None # Variable explorer plugin
self.python_count = 0
self.terminal_count = 0
try:
from sip import setapi #analysis:ignore
except ImportError:
self.set_option('pyqt/ignore_sip_setapi_errors', False)
# Python executable selection (initializing default values as well)
executable = self.get_option('pythonexecutable',
get_python_executable())
if self.get_option('pythonexecutable/default'):
executable = get_python_executable()
# Python startup file selection
if not osp.isfile(self.get_option('pythonstartup', '')):
self.set_option('pythonstartup', SCIENTIFIC_STARTUP)
# default/custom settings are mutually exclusive:
self.set_option('pythonstartup/custom',
not self.get_option('pythonstartup/default'))
if not osp.isfile(executable):
# This is absolutely necessary, in case the Python interpreter
# executable has been moved since last Spyder execution (following
# a Python distribution upgrade for example)
self.set_option('pythonexecutable', get_python_executable())
elif executable.endswith('pythonw.exe'):
# That should not be necessary because this case is already taken
# care of by the `get_python_executable` function but, this was
# implemented too late, so we have to fix it here too, in case
# the Python executable has already been set with pythonw.exe:
self.set_option('pythonexecutable',
executable.replace("pythonw.exe", "python.exe"))
self.shellwidgets = []
self.filenames = []
self.icons = []
self.runfile_args = ""
# Initialize plugin
self.initialize_plugin()
layout = QVBoxLayout()
self.tabwidget = Tabs(self, self.menu_actions)
if hasattr(self.tabwidget, 'setDocumentMode')\
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the console is detached from the main window
# Fixes Issue 561
self.tabwidget.setDocumentMode(True)
self.connect(self.tabwidget, SIGNAL('currentChanged(int)'),
self.refresh_plugin)
self.connect(self.tabwidget, SIGNAL('move_data(int,int)'),
self.move_tab)
self.connect(self.main, SIGNAL("pythonpath_changed()"),
self.set_path)
self.tabwidget.set_close_function(self.close_console)
layout.addWidget(self.tabwidget)
# Find/replace widget
self.find_widget = FindReplace(self)
self.find_widget.hide()
self.register_widget_shortcuts("Editor", self.find_widget)
layout.addWidget(self.find_widget)
self.setLayout(layout)
# Accepting drops
self.setAcceptDrops(True)
self.setAutoFillBackground(False)
self.setStyleSheet(Settings.external_Console_Style_Sheet)
def move_tab(self, index_from, index_to):
"""
Move tab (tabs themselves have already been moved by the tabwidget)
"""
filename = self.filenames.pop(index_from)
shell = self.shellwidgets.pop(index_from)
icons = self.icons.pop(index_from)
self.filenames.insert(index_to, filename)
self.shellwidgets.insert(index_to, shell)
self.icons.insert(index_to, icons)
self.emit(SIGNAL('update_plugin_title()'))
def get_shell_index_from_id(self, shell_id):
"""Return shellwidget index from id"""
for index, shell in enumerate(self.shellwidgets):
if id(shell) == shell_id:
return index
def close_console(self, index=None, from_ipyclient=False):
"""Close console tab from index or widget (or close current tab)"""
# Get tab index
if not self.tabwidget.count():
return
if index is None:
index = self.tabwidget.currentIndex()
# Detect what widget we are trying to close
for i, s in enumerate(self.shellwidgets):
if index == i:
shellwidget = s
# If the tab is an IPython kernel, try to detect if it has a client
# connected to it
if shellwidget.is_ipykernel:
ipyclients = self.main.ipyconsole.get_clients()
if ipyclients:
for ic in ipyclients:
if ic.kernel_widget_id == id(shellwidget):
connected_ipyclient = True
break
else:
connected_ipyclient = False
else:
connected_ipyclient = False
# Closing logic
if not shellwidget.is_ipykernel or from_ipyclient or \
not connected_ipyclient:
self.tabwidget.widget(index).close()
self.tabwidget.removeTab(index)
self.filenames.pop(index)
self.shellwidgets.pop(index)
self.icons.pop(index)
self.emit(SIGNAL('update_plugin_title()'))
else:
QMessageBox.question(self, _('Trying to kill a kernel?'),
_("You can't close this kernel because it has one or more "
"consoles connected to it.<br><br>"
"You need to close them instead or you can kill the kernel "
"using the second button from right to left."),
QMessageBox.Ok)
def set_variableexplorer(self, variableexplorer):
"""Set variable explorer plugin"""
self.variableexplorer = variableexplorer
def set_path(self):
"""Set consoles PYTHONPATH if changed by the user"""
from spyderlib.widgets.externalshell import pythonshell
for sw in self.shellwidgets:
if isinstance(sw, pythonshell.ExternalPythonShell):
if sw.is_interpreter and sw.is_running():
sw.path = self.main.get_spyder_pythonpath()
sw.shell.path = sw.path
def __find_python_shell(self, interpreter_only=False):
current_index = self.tabwidget.currentIndex()
if current_index == -1:
return
from spyderlib.widgets.externalshell import pythonshell
for index in [current_index]+list(range(self.tabwidget.count())):
shellwidget = self.tabwidget.widget(index)
if isinstance(shellwidget, pythonshell.ExternalPythonShell):
if interpreter_only and not shellwidget.is_interpreter:
continue
elif not shellwidget.is_running():
continue
else:
self.tabwidget.setCurrentIndex(index)
return shellwidget
def get_current_shell(self):
"""
Called by object inspector to retrieve the current shell instance
"""
shellwidget = self.__find_python_shell()
return shellwidget.shell
def get_running_python_shell(self):
"""
Called by object inspector to retrieve a running Python shell instance
"""
current_index = self.tabwidget.currentIndex()
if current_index == -1:
return
from spyderlib.widgets.externalshell import pythonshell
shellwidgets = [self.tabwidget.widget(index)
for index in range(self.tabwidget.count())]
shellwidgets = [_w for _w in shellwidgets
if isinstance(_w, pythonshell.ExternalPythonShell) \
and _w.is_running()]
if shellwidgets:
# First, iterate on interpreters only:
for shellwidget in shellwidgets:
if shellwidget.is_interpreter:
return shellwidget.shell
else:
return shellwidgets[0].shell
def run_script_in_current_shell(self, filename, wdir, args, debug):
"""Run script in current shell, if any"""
norm = lambda text: remove_backslashes(to_text_string(text))
line = "%s('%s'" % ('debugfile' if debug else 'runfile',
norm(filename))
if args:
line += ", args='%s'" % norm(args)
if wdir:
line += ", wdir='%s'" % norm(wdir)
line += ")"
if not self.execute_python_code(line, interpreter_only=True):
QMessageBox.warning(self, _('Warning'),
_("No Python console is currently selected to run <b>%s</b>."
"<br><br>Please select or open a new Python console "
"and try again."
) % osp.basename(norm(filename)), QMessageBox.Ok)
else:
self.visibility_changed(True)
self.raise_()
def set_current_shell_working_directory(self, directory):
"""Set current shell working directory"""
shellwidget = self.__find_python_shell()
if shellwidget is not None:
shellwidget.shell.set_cwd(to_text_string(directory))
def execute_python_code(self, lines, interpreter_only=False):
"""Execute Python code in an already opened Python interpreter"""
shellwidget = self.__find_python_shell(
interpreter_only=interpreter_only)
if (shellwidget is not None) and (not shellwidget.is_ipykernel):
shellwidget.shell.execute_lines(to_text_string(lines))
self.activateWindow()
shellwidget.shell.setFocus()
return True
else:
return False
def pdb_has_stopped(self, fname, lineno, shellwidget):
"""Python debugger has just stopped at frame (fname, lineno)"""
# This is a unique form of the edit_goto signal that is intended to
# prevent keyboard input from accidentally entering the editor
# during repeated, rapid entry of debugging commands.
self.emit(SIGNAL("edit_goto(QString,int,QString,bool)"),
fname, lineno, '', False)
if shellwidget.is_ipykernel:
# Focus client widget, not kernel
ipw = self.main.ipyconsole.get_focus_widget()
self.main.ipyconsole.activateWindow()
ipw.setFocus()
else:
self.activateWindow()
shellwidget.shell.setFocus()
def set_spyder_breakpoints(self):
"""Set all Spyder breakpoints into all shells"""
for shellwidget in self.shellwidgets:
shellwidget.shell.set_spyder_breakpoints()
def start(self, fname, wdir=None, args='', interact=False, debug=False,
python=True, ipykernel=False, ipyclient=None,
give_ipyclient_focus=True, python_args=''):
"""
Start new console
fname:
string: filename of script to run
None: open an interpreter
wdir: working directory
args: command line options of the Python script
interact: inspect script interactively after its execution
debug: run pdb
python: True: Python interpreter, False: terminal
ipykernel: True: IPython kernel
ipyclient: True: Automatically create an IPython client
python_args: additionnal Python interpreter command line options
(option "-u" is mandatory, see widgets.externalshell package)
"""
# Note: fname is None <=> Python interpreter
if fname is not None and not is_text_string(fname):
fname = to_text_string(fname)
if wdir is not None and not is_text_string(wdir):
wdir = to_text_string(wdir)
if fname is not None and fname in self.filenames:
index = self.filenames.index(fname)
if self.get_option('single_tab'):
old_shell = self.shellwidgets[index]
if old_shell.is_running():
runconfig = get_run_configuration(fname)
if runconfig is None or runconfig.show_kill_warning:
answer = QMessageBox.question(self, self.get_plugin_title(),
_("%s is already running in a separate process.\n"
"Do you want to kill the process before starting "
"a new one?") % osp.basename(fname),
QMessageBox.Yes | QMessageBox.Cancel)
else:
answer = QMessageBox.Yes
if answer == QMessageBox.Yes:
old_shell.process.kill()
old_shell.process.waitForFinished()
else:
return
self.close_console(index)
else:
index = self.tabwidget.count()
# Creating a new external shell
pythonpath = self.main.get_spyder_pythonpath()
light_background = self.get_option('light_background')
show_elapsed_time = self.get_option('show_elapsed_time')
if python:
if self.get_option('pythonexecutable/default'):
pythonexecutable = get_python_executable()
else:
pythonexecutable = self.get_option('pythonexecutable')
if self.get_option('pythonstartup/default') or ipykernel:
pythonstartup = None
else:
pythonstartup = self.get_option('pythonstartup', None)
monitor_enabled = self.get_option('monitor/enabled')
if self.get_option('matplotlib/backend/enabled'):
mpl_backend = self.get_option('matplotlib/backend/value')
else:
mpl_backend = None
ets_backend = self.get_option('ets_backend')
qt_api = self.get_option('qt/api')
if qt_api not in ('pyqt', 'pyside'):
qt_api = None
pyqt_api = self.get_option('pyqt/api_version')
ignore_sip_setapi_errors = self.get_option(
'pyqt/ignore_sip_setapi_errors')
merge_output_channels = self.get_option('merge_output_channels')
colorize_sys_stderr = self.get_option('colorize_sys_stderr')
umr_enabled = self.get_option('umr/enabled')
umr_namelist = self.get_option('umr/namelist')
umr_verbose = self.get_option('umr/verbose')
ar_timeout = CONF.get('variable_explorer', 'autorefresh/timeout')
ar_state = CONF.get('variable_explorer', 'autorefresh')
# CRUCIAL NOTE FOR IPYTHON KERNELS:
# autorefresh needs to be on so that our monitor
# can find __ipythonkernel__ in the globals namespace
# *after* the kernel has been started.
# Without the ns refresh provided by autorefresh, a
# client is *never* started (although the kernel is)
# Fix Issue 1595
if not ar_state and ipykernel:
ar_state = True
if self.light_mode:
from spyderlib.plugins.variableexplorer import VariableExplorer
sa_settings = VariableExplorer.get_settings()
else:
sa_settings = None
shellwidget = ExternalPythonShell(self, fname, wdir,
interact, debug, path=pythonpath,
python_args=python_args,
ipykernel=ipykernel,
arguments=args, stand_alone=sa_settings,
pythonstartup=pythonstartup,
pythonexecutable=pythonexecutable,
umr_enabled=umr_enabled, umr_namelist=umr_namelist,
umr_verbose=umr_verbose, ets_backend=ets_backend,
monitor_enabled=monitor_enabled,
mpl_backend=mpl_backend,
qt_api=qt_api, pyqt_api=pyqt_api,
ignore_sip_setapi_errors=ignore_sip_setapi_errors,
merge_output_channels=merge_output_channels,
colorize_sys_stderr=colorize_sys_stderr,
autorefresh_timeout=ar_timeout,
autorefresh_state=ar_state,
light_background=light_background,
menu_actions=self.menu_actions,
show_buttons_inside=False,
show_elapsed_time=show_elapsed_time)
self.connect(shellwidget, SIGNAL('pdb(QString,int)'),
lambda fname, lineno, shellwidget=shellwidget:
self.pdb_has_stopped(fname, lineno, shellwidget))
self.register_widget_shortcuts("Console", shellwidget.shell)
else:
if os.name == 'posix':
cmd = 'gnome-terminal'
args = []
if programs.is_program_installed(cmd):
if wdir:
args.extend(['--working-directory=%s' % wdir])
programs.run_program(cmd, args)
return
cmd = 'konsole'
if programs.is_program_installed(cmd):
if wdir:
args.extend(['--workdir', wdir])
programs.run_program(cmd, args)
return
shellwidget = ExternalSystemShell(self, wdir, path=pythonpath,
light_background=light_background,
menu_actions=self.menu_actions,
show_buttons_inside=False,
show_elapsed_time=show_elapsed_time)
# Code completion / calltips
shellwidget.shell.setMaximumBlockCount(
self.get_option('max_line_count') )
shellwidget.shell.set_font( self.get_plugin_font() )
shellwidget.shell.toggle_wrap_mode( self.get_option('wrap') )
shellwidget.shell.set_calltips( self.get_option('calltips') )
shellwidget.shell.set_codecompletion_auto(
self.get_option('codecompletion/auto') )
shellwidget.shell.set_codecompletion_case(
self.get_option('codecompletion/case_sensitive') )
shellwidget.shell.set_codecompletion_enter(
self.get_option('codecompletion/enter_key') )
if python and self.inspector is not None:
shellwidget.shell.set_inspector(self.inspector)
shellwidget.shell.set_inspector_enabled(
CONF.get('inspector', 'connect/python_console'))
if self.historylog is not None:
self.historylog.add_history(shellwidget.shell.history_filename)
self.connect(shellwidget.shell,
SIGNAL('append_to_history(QString,QString)'),
self.historylog.append_to_history)
self.connect(shellwidget.shell, SIGNAL("go_to_error(QString)"),
self.go_to_error)
self.connect(shellwidget.shell, SIGNAL("focus_changed()"),
lambda: self.emit(SIGNAL("focus_changed()")))
if python:
if self.main.editor is not None:
self.connect(shellwidget, SIGNAL('open_file(QString,int)'),
self.open_file_in_spyder)
if fname is None:
if ipykernel:
# Connect client to any possible error while starting the
# kernel
ipyclient.connect(shellwidget,
SIGNAL("ipython_kernel_start_error(QString)"),
lambda error: ipyclient.show_kernel_error(error))
# Detect if kernel and frontend match or not
# Don't apply this for our Mac app because it's
# failing, see Issue 2006
if self.get_option('pythonexecutable/custom') and \
not running_in_mac_app():
frontend_ver = programs.get_module_version('IPython')
old_vers = ['1', '2']
if any([frontend_ver.startswith(v) for v in old_vers]):
frontend_ver = '<3.0'
else:
frontend_ver = '>=3.0'
pyexec = self.get_option('pythonexecutable')
kernel_and_frontend_match = \
programs.is_module_installed('IPython',
version=frontend_ver,
interpreter=pyexec)
else:
kernel_and_frontend_match = True
# Create a a kernel tab only if frontend and kernel
# versions match
if kernel_and_frontend_match:
tab_name = _("Kernel")
tab_icon1 = get_icon('ipython_console.png')
tab_icon2 = get_icon('ipython_console_t.png')
self.connect(shellwidget,
SIGNAL('create_ipython_client(QString)'),
lambda cf: self.register_ipyclient(cf,
ipyclient,
shellwidget,
give_focus=give_ipyclient_focus))
else:
shellwidget.emit(
SIGNAL("ipython_kernel_start_error(QString)"),
_("Either:"
"<ol>"
"<li>Your IPython frontend and kernel versions "
"are <b>incompatible</b> or</li>"
"<li>You <b>don't have</b> IPython installed in "
"your external interpreter.</li>"
"</ol>"
"In any case, we're sorry but we can't create a "
"console for you."))
shellwidget.deleteLater()
shellwidget = None
return
else:
self.python_count += 1
tab_name = "Python %d" % self.python_count
tab_icon1 = get_icon('python.png')
tab_icon2 = get_icon('python_t.png')
else:
tab_name = osp.basename(fname)
tab_icon1 = get_icon('run.png')
tab_icon2 = get_icon('terminated.png')
else:
fname = id(shellwidget)
if os.name == 'nt':
tab_name = _("Command Window")
else:
tab_name = _("Terminal")
self.terminal_count += 1
tab_name += (" %d" % self.terminal_count)
tab_icon1 = get_icon('cmdprompt.png')
tab_icon2 = get_icon('cmdprompt_t.png')
self.shellwidgets.insert(index, shellwidget)
self.filenames.insert(index, fname)
self.icons.insert(index, (tab_icon1, tab_icon2))
if index is None:
index = self.tabwidget.addTab(shellwidget, tab_name)
else:
self.tabwidget.insertTab(index, shellwidget, tab_name)
self.connect(shellwidget, SIGNAL("started()"),
lambda sid=id(shellwidget): self.process_started(sid))
self.connect(shellwidget, SIGNAL("finished()"),
lambda sid=id(shellwidget): self.process_finished(sid))
self.find_widget.set_editor(shellwidget.shell)
self.tabwidget.setTabToolTip(index, fname if wdir is None else wdir)
self.tabwidget.setCurrentIndex(index)
if self.dockwidget and not self.ismaximized and not ipykernel:
self.dockwidget.setVisible(True)
self.dockwidget.raise_()
shellwidget.set_icontext_visible(self.get_option('show_icontext'))
# Start process and give focus to console
shellwidget.start_shell()
if not ipykernel:
self.activateWindow()
shellwidget.shell.setFocus()
def set_ipykernel_attrs(self, connection_file, kernel_widget, name):
"""Add the pid of the kernel process to an IPython kernel tab"""
# Set connection file
kernel_widget.connection_file = connection_file
# If we've reached this point then it's safe to assume IPython
# is available, and this import should be valid.
from IPython.core.application import get_ipython_dir
# For each kernel we launch, setup to delete the associated
# connection file at the time Spyder exits.
def cleanup_connection_file(connection_file):
connection_file = osp.join(get_ipython_dir(), 'profile_default',
'security', connection_file)
try:
os.remove(connection_file)
except OSError:
pass
atexit.register(cleanup_connection_file, connection_file)
# Set tab name according to client master name
index = self.get_shell_index_from_id(id(kernel_widget))
tab_name = _("Kernel %s") % name
self.tabwidget.setTabText(index, tab_name)
def register_ipyclient(self, connection_file, ipyclient, kernel_widget,
give_focus=True):
"""
Register `ipyclient` to be connected to `kernel_widget`
"""
# Check if our client already has a connection_file and kernel_widget_id
# which means that we are asking for a kernel restart
if ipyclient.connection_file is not None \
and ipyclient.kernel_widget_id is not None:
restart_kernel = True
else:
restart_kernel = False
# Setting kernel widget attributes
name = ipyclient.name.split('/')[0]
self.set_ipykernel_attrs(connection_file, kernel_widget, name)
# Creating the client
ipyconsole = self.main.ipyconsole
ipyclient.connection_file = connection_file
ipyclient.kernel_widget_id = id(kernel_widget)
ipyconsole.register_client(ipyclient, restart=restart_kernel,
give_focus=give_focus)
def open_file_in_spyder(self, fname, lineno):
"""Open file in Spyder's editor from remote process"""
self.main.editor.activateWindow()
self.main.editor.raise_()
self.main.editor.load(fname, lineno)
#------ Private API -------------------------------------------------------
def process_started(self, shell_id):
index = self.get_shell_index_from_id(shell_id)
shell = self.shellwidgets[index]
icon, _icon = self.icons[index]
self.tabwidget.setTabIcon(index, icon)
if self.inspector is not None:
self.inspector.set_shell(shell.shell)
if self.variableexplorer is not None:
self.variableexplorer.add_shellwidget(shell)
def process_finished(self, shell_id):
index = self.get_shell_index_from_id(shell_id)
if index is not None:
# Not sure why it happens, but sometimes the shellwidget has
# already been removed, so that's not bad if we can't change
# the tab icon...
_icon, icon = self.icons[index]
self.tabwidget.setTabIcon(index, icon)
if self.variableexplorer is not None:
self.variableexplorer.remove_shellwidget(shell_id)
#------ SpyderPluginWidget API --------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
title = _('Console')
if self.filenames:
index = self.tabwidget.currentIndex()
fname = self.filenames[index]
if fname:
title += ' - ' + to_text_string(fname)
return title
def get_plugin_icon(self):
"""Return widget icon"""
return get_icon('console.png')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
return self.tabwidget.currentWidget()
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
interpreter_action = create_action(self,
_("Open a &Python console"), None,
'python.png', triggered=self.open_interpreter)
if os.name == 'nt':
text = _("Open &command prompt")
tip = _("Open a Windows command prompt")
else:
text = _("Open a &terminal")
tip = _("Open a terminal window")
terminal_action = create_action(self, text, None, None, tip,
triggered=self.open_terminal)
run_action = create_action(self,
_("&Run..."), None,
'run_small.png', _("Run a Python script"),
triggered=self.run_script)
consoles_menu_actions = [interpreter_action]
tools_menu_actions = [terminal_action]
self.menu_actions = [interpreter_action, terminal_action, run_action]
self.main.consoles_menu_actions += consoles_menu_actions
self.main.tools_menu_actions += tools_menu_actions
return self.menu_actions+consoles_menu_actions+tools_menu_actions
def register_plugin(self):
"""Register plugin in Spyder's main window"""
if self.main.light:
self.main.setCentralWidget(self)
self.main.widgetlist.append(self)
else:
self.main.add_dockwidget(self)
self.inspector = self.main.inspector
if self.inspector is not None:
self.inspector.set_external_console(self)
self.historylog = self.main.historylog
self.connect(self, SIGNAL("edit_goto(QString,int,QString)"),
self.main.editor.load)
self.connect(self, SIGNAL("edit_goto(QString,int,QString,bool)"),
lambda fname, lineno, word, processevents:
self.main.editor.load(fname, lineno, word,
processevents=processevents))
self.connect(self.main.editor,
SIGNAL('run_in_current_extconsole(QString,QString,QString,bool)'),
self.run_script_in_current_shell)
self.connect(self.main.editor,
SIGNAL("breakpoints_saved()"),
self.set_spyder_breakpoints)
self.connect(self.main.editor, SIGNAL("open_dir(QString)"),
self.set_current_shell_working_directory)
self.connect(self.main.workingdirectory,
SIGNAL("set_current_console_wd(QString)"),
self.set_current_shell_working_directory)
self.connect(self, SIGNAL('focus_changed()'),
self.main.plugin_focus_changed)
self.connect(self, SIGNAL('redirect_stdio(bool)'),
self.main.redirect_internalshell_stdio)
expl = self.main.explorer
if expl is not None:
self.connect(expl, SIGNAL("open_terminal(QString)"),
self.open_terminal)
self.connect(expl, SIGNAL("open_interpreter(QString)"),
self.open_interpreter)
pexpl = self.main.projectexplorer
if pexpl is not None:
self.connect(pexpl, SIGNAL("open_terminal(QString)"),
self.open_terminal)
self.connect(pexpl, SIGNAL("open_interpreter(QString)"),
self.open_interpreter)
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
for shellwidget in self.shellwidgets:
shellwidget.close()
return True
def refresh_plugin(self):
"""Refresh tabwidget"""
shellwidget = None
if self.tabwidget.count():
shellwidget = self.tabwidget.currentWidget()
editor = shellwidget.shell
editor.setFocus()
widgets = [shellwidget.create_time_label(), 5
]+shellwidget.get_toolbar_buttons()+[5]
else:
editor = None
widgets = []
self.find_widget.set_editor(editor)
self.tabwidget.set_corner_widgets({Qt.TopRightCorner: widgets})
if shellwidget:
shellwidget.update_time_label_visibility()
self.main.last_console_plugin_focus_was_python = True
self.emit(SIGNAL('update_plugin_title()'))
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
font_n = 'plugin_font'
font_o = self.get_plugin_font()
showtime_n = 'show_elapsed_time'
showtime_o = self.get_option(showtime_n)
icontext_n = 'show_icontext'
icontext_o = self.get_option(icontext_n)
calltips_n = 'calltips'
calltips_o = self.get_option(calltips_n)
inspector_n = 'connect_to_oi'
inspector_o = CONF.get('inspector', 'connect/python_console')
wrap_n = 'wrap'
wrap_o = self.get_option(wrap_n)
compauto_n = 'codecompletion/auto'
compauto_o = self.get_option(compauto_n)
case_comp_n = 'codecompletion/case_sensitive'
case_comp_o = self.get_option(case_comp_n)
compenter_n = 'codecompletion/enter_key'
compenter_o = self.get_option(compenter_n)
mlc_n = 'max_line_count'
mlc_o = self.get_option(mlc_n)
for shellwidget in self.shellwidgets:
if font_n in options:
shellwidget.shell.set_font(font_o)
completion_size = CONF.get('shell_appearance',
'completion/size')
comp_widget = shellwidget.shell.completion_widget
comp_widget.setup_appearance(completion_size, font_o)
if showtime_n in options:
shellwidget.set_elapsed_time_visible(showtime_o)
if icontext_n in options:
shellwidget.set_icontext_visible(icontext_o)
if calltips_n in options:
shellwidget.shell.set_calltips(calltips_o)
if inspector_n in options:
if isinstance(shellwidget, ExternalPythonShell):
shellwidget.shell.set_inspector_enabled(inspector_o)
if wrap_n in options:
shellwidget.shell.toggle_wrap_mode(wrap_o)
if compauto_n in options:
shellwidget.shell.set_codecompletion_auto(compauto_o)
if case_comp_n in options:
shellwidget.shell.set_codecompletion_case(case_comp_o)
if compenter_n in options:
shellwidget.shell.set_codecompletion_enter(compenter_o)
if mlc_n in options:
shellwidget.shell.setMaximumBlockCount(mlc_o)
#------ SpyderPluginMixin API ---------------------------------------------
def toggle_view(self, checked):
"""Toggle view"""
if checked:
self.dockwidget.show()
self.dockwidget.raise_()
# Start a console in case there are none shown
from spyderlib.widgets.externalshell import pythonshell
consoles = None
for sw in self.shellwidgets:
if isinstance(sw, pythonshell.ExternalPythonShell):
if not sw.is_ipykernel:
consoles = True
break
if not consoles:
self.open_interpreter()
else:
self.dockwidget.hide()
#------ Public API ---------------------------------------------------------
def open_interpreter(self, wdir=None):
"""Open interpreter"""
if wdir is None:
wdir = getcwd()
if not self.main.light:
self.visibility_changed(True)
self.start(fname=None, wdir=to_text_string(wdir), args='',
interact=True, debug=False, python=True)
def start_ipykernel(self, client, wdir=None, give_focus=True):
"""Start new IPython kernel"""
if not self.get_option('monitor/enabled'):
QMessageBox.warning(self, _('Open an IPython console'),
_("The console monitor was disabled: the IPython kernel will "
"be started as expected, but an IPython console will have "
"to be connected manually to the kernel."), QMessageBox.Ok)
if wdir is None:
wdir = getcwd()
self.main.ipyconsole.visibility_changed(True)
self.start(fname=None, wdir=to_text_string(wdir), args='',
interact=True, debug=False, python=True, ipykernel=True,
ipyclient=client, give_ipyclient_focus=give_focus)
def open_terminal(self, wdir=None):
"""Open terminal"""
if wdir is None:
wdir = getcwd()
self.start(fname=None, wdir=to_text_string(wdir), args='',
interact=True, debug=False, python=False)
def run_script(self):
"""Run a Python script"""
self.emit(SIGNAL('redirect_stdio(bool)'), False)
filename, _selfilter = getopenfilename(self, _("Run Python script"),
getcwd(), _("Python scripts")+" (*.py ; *.pyw ; *.ipy)")
self.emit(SIGNAL('redirect_stdio(bool)'), True)
if filename:
self.start(fname=filename, wdir=None, args='',
interact=False, debug=False)
def set_umr_namelist(self):
"""Set UMR excluded modules name list"""
arguments, valid = QInputDialog.getText(self, _('UMR'),
_('UMR excluded modules:\n'
'(example: guidata, guiqwt)'),
QLineEdit.Normal,
", ".join(self.get_option('umr/namelist')))
if valid:
arguments = to_text_string(arguments)
if arguments:
namelist = arguments.replace(' ', '').split(',')
fixed_namelist = [module_name for module_name in namelist
if programs.is_module_installed(module_name)]
invalid = ", ".join(set(namelist)-set(fixed_namelist))
if invalid:
QMessageBox.warning(self, _('UMR'),
_("The following modules are not "
"installed on your machine:\n%s"
) % invalid, QMessageBox.Ok)
QMessageBox.information(self, _('UMR'),
_("Please note that these changes will "
"be applied only to new Python/IPython "
"consoles"), QMessageBox.Ok)
else:
fixed_namelist = []
self.set_option('umr/namelist', fixed_namelist)
def go_to_error(self, text):
"""Go to error if relevant"""
match = get_error_match(to_text_string(text))
if match:
fname, lnb = match.groups()
self.emit(SIGNAL("edit_goto(QString,int,QString)"),
osp.abspath(fname), int(lnb), '')
#----Drag and drop
def dragEnterEvent(self, event):
"""Reimplement Qt method
Inform Qt about the types of data that the widget accepts"""
source = event.mimeData()
if source.hasUrls():
if mimedata2url(source):
pathlist = mimedata2url(source)
shellwidget = self.tabwidget.currentWidget()
if all([is_python_script(to_text_string(qstr))
for qstr in pathlist]):
event.acceptProposedAction()
elif shellwidget is None or not shellwidget.is_running():
event.ignore()
else:
event.acceptProposedAction()
else:
event.ignore()
elif source.hasText():
event.acceptProposedAction()
def dropEvent(self, event):
"""Reimplement Qt method
Unpack dropped data and handle it"""
source = event.mimeData()
shellwidget = self.tabwidget.currentWidget()
if source.hasText():
qstr = source.text()
if is_python_script(to_text_string(qstr)):
self.start(qstr)
elif shellwidget:
shellwidget.shell.insert_text(qstr)
elif source.hasUrls():
pathlist = mimedata2url(source)
if all([is_python_script(to_text_string(qstr))
for qstr in pathlist]):
for fname in pathlist:
self.start(fname)
elif shellwidget:
shellwidget.shell.drop_pathlist(pathlist)
event.acceptProposedAction()
| gpl-3.0 |
mdeff/ntds_2017 | projects/reports/speech_recognition/main_pipeline.py | 1 | 13366 |
import os
from os.path import isdir, join
from pathlib import Path
import pandas as pd
from tqdm import tqdm
# Math
import numpy as np
import scipy.stats
from scipy.fftpack import fft
from scipy import signal
from scipy.io import wavfile
import librosa
import librosa.display
from scipy import sparse, stats, spatial
import scipy.sparse.linalg
# Machine learning
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
import IPython.display as ipd
# Cutting
from cut_audio import *
def main_train_audio_extraction():
'''
- Function that allow the extraction of all the audio files.
- Process :
1. Indexing the path, the class and the speaker of all the audio files.
2. Audio Extraction :
2.1. Loading all the audio files into memory
2.2. Detecting the position of the word inside each audio files and cutting them
2.3. Saving into a Pickled DataFrame all the audio and their cutted version
'''
train_audio_path = join('..','Project','data','train','audio')
# Listing the directories of each word class
dirs = [f for f in os.listdir(train_audio_path) if isdir(join(train_audio_path, f))]
dirs.sort()
path = []
word = []
speaker = []
iteration = []
# Loading the information of the audio files
for direct in dirs:
if not direct.startswith('_'):
list_files = os.listdir(join(train_audio_path, direct))
wave_selected = list([ f for f in list_files if f.endswith('.wav')])
# Extraction of file informations for dataframe
word.extend(list(np.repeat(direct,len(wave_selected),axis=0)))
speaker.extend([wave_selected[f].split('.')[0].split('_')[0] for f in range(len(wave_selected)) ])
iteration.extend([wave_selected[f].split('.')[0].split('_')[-1] for f in range(len(wave_selected)) ])
path.extend([train_audio_path + '/' + direct + '/' + wave_selected[f] for f in range(len(wave_selected))])
# Saving those informations into a pandas DataFrame
features_og = pd.DataFrame({('info','word',''): word,
('info','speaker',''): speaker,
('info','iteration',''): iteration,
('info','path',''): path})
index_og = [('info','word',''),('info','speaker',''),('info','iteration','')]
print('Number of signals : ' + str(len(features_og)))
# Load and cut the audio files.
raw_audio_df = load_audio_file(features_og)
# Save the raw audio Dataframe into a set a pickles :
i = 0
k = 0
while True :
i_next = i + 6000
k += 1
if i_next < len(raw_audio_df) :
raw_audio_df.iloc[i:i_next].to_pickle(('../Project/data/raw_audio_all_'+ str(k)+'.pickle'))
else :
raw_audio_df.iloc[i:len(raw_audio_df)].to_pickle(('../Project/data/raw_audio_all_'+ str(k)+'.pickle'))
break
i = i_next
def main_train_audio_features():
'''
- Function that allow the computation of all the features.
- Process :
1. Load the raw audio files.
2. Features Extraction :
2.1. Loading the Previously pickled raw audio file.
2.2. Computing the MFCC of all the cutted version of the audio files.
2.3. Saving them in a Pickled Pandas DataFrame
'''
audio_loaded_df = pd.read_pickle(('../Project/data/raw_audio_all_'+ str(1)+'.pickle'))
for i in range(2,12):
audio_loaded_df = audio_loaded_df.append(pd.read_pickle(('../Project/data/raw_audio_all_'+ str(i)+'.pickle')))
# Optimal Parameters :
N_MFCC = 10
N_FFT = int(2048/2)
NUM_MFCCS_VEC = 20
audio_loaded_df = audio_loaded_df.drop(2113).reset_index(drop=True)
features_og = compute_mfcc_raw(audio_loaded_df,N_MFCC,N_FFT,NUM_MFCCS_VEC,cut=True)
# Save features DataFrame as pickle
features_og.drop(axis=1,columns=('audio')).to_pickle('./Features Data/cut_mfccs_all_raw_10_1028_20.pickle')
features_og.head(2)
def load_audio_file(features_og):
print("----- Start Importation -----")
count_drop = 0
audio_df = pd.DataFrame(columns=pd.MultiIndex.from_tuples([('audio','raw',''),('audio','sr',''),('audio','cut','')]),index=features_og.index)
for w in tqdm(range(len(features_og)),total=len(features_og),unit='waves'):
audio, sampling_rate = librosa.load(features_og[('info','path')].iloc[w], sr=None, mono=True)
clean_condition = (np.max(audio) != 0.0)
if clean_condition:
audio_df.loc[w,('audio','raw','')] = audio
audio_df.loc[w,('audio','sr','')] = sampling_rate
audio_df.loc[w,('audio','cut','')] = cut_signal(audio)
else :
count_drop += 1
audio_df.drop(w)
features_og.drop(w)
audio_df = features_og.merge(audio_df,left_index=True,right_index=True)
print("----- End Importation -----")
print("Number of dropped signals :",count_drop)
return audio_df
def compute_mfcc_raw(features_og,N_MFCC,N_FFT,NUM_MFCCS_VEC,cut=True):
'''
This function computes the raw MFCC parameters for and allow the choice of parameters
'''
stat_name= ['raw_mfcc']
col_names = [('mfcc',stat_name[i],j) for i in range(len(stat_name)) for j in range(N_MFCC*NUM_MFCCS_VEC)]
features_mfcc = pd.DataFrame(columns=pd.MultiIndex.from_tuples(col_names),index=features_og.index)
# sorting the columns in order to improve index performances (see lexsort errors)
features_mfcc.sort_index(axis=1,inplace=True,sort_remaining=True)
# MFCC FEATURES :
for w in tqdm(range(len(features_og)),total=len(features_og),unit='waves'):
# Handling the cut version of the signal :
if cut == True :
audio = features_og.loc[w,('audio','cut','')]
else :
audio = features_og.loc[w,('audio','raw','')]
sampling_rate = features_og.loc[w,('audio','sr','')]
# Computing the MFCC for each signal :
mfcc = librosa.feature.mfcc(y=audio,sr=sampling_rate, n_mfcc=N_MFCC, n_fft = N_FFT, hop_length = int(np.floor(len(audio)/NUM_MFCCS_VEC)))
features_mfcc.loc[w, ('mfcc', 'raw_mfcc')] = mfcc[:,:-1].reshape(-1,)
features_og = features_og.merge(features_mfcc,left_index=True,right_index=True)
return features_og
def fit_and_test(clf, train_x, train_y, test_x, test_y):
clf.fit(train_x, train_y)
predict_y = clf.predict(test_x)
return predict_y
def adapt_labels(x_hat, class_names):
# Real accuracy considering only the main words :
class_names_list = ["yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go", "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
mask_names_main = [True if name in class_names_list else False for name in class_names]
index_names_main = [i for i in range(len(mask_names_main)) if mask_names_main[i] == True]
inverted_index_names = dict(zip(index_names_main,range(len(index_names_main))))
# Creating the label names :
class_names_main = class_names[mask_names_main].tolist()
class_names_main.extend(["unknown"])
# Adapting the labels in the test and prediction sets :
return np.array([inverted_index_names[int(x_hat[i])] if x_hat[i] in index_names_main else len(class_names_main)-1 for i in range(len(x_hat)) ]),class_names_main
def solve(Y_compr, M, L, alpha, beta):
"""Solves the above defined optimization problem t find an estimated label vector."""
X = np.ones(Y_compr.shape)
for i in range(Y_compr.shape[0]):
Mask = np.diag(M[i,:])
y_i_compr = Y_compr[i,:]
X[i,:] = np.linalg.solve((Mask+alpha*L+beta),y_i_compr)
return X
# pipeline function for semisupervised learning using graphs
def semisup_test_all_dataset(features_og, y, batch_size, NEIGHBORS, alpha, beta, iter_max, class_names):
"""Test semisupervised graph learning algorithm for entire dataset.
- features_og : original copy of all MFCCs
- batch_size : number of samples to be predict per iteration in main loop
- NEIGHBORS : number of neirest neighbors in k-NN sparsification
- alpha : hyper-parameter which controls the trade-off between the data fidelity term and the smoothness prio
- beta : hyper-paramter which controls the importance of the l2 regularization for semi-supervised learning
"""
accuracy_mat = np.zeros((2,iter_max))
for itr in tqdm(range(iter_max)):
# Specify the number of datapoints that should be sampled in each class to build training and validation set
train_size = 160
valid_size = 1553
train_x = np.array([])
train_y = np.array([])
valid_x = np.array([])
valid_y = np.array([])
for i in range(len(class_names)):
class_index = np.where(y == (i+1))[0]
random_index = np.random.choice(range(len(class_index)), size=train_size+valid_size, replace=False)
train_x_class = class_index[random_index[:train_size]]
train_y_class = y[train_x_class]
train_x = np.append(train_x, train_x_class).astype(int)
train_y = np.append(train_y, train_y_class).astype(int)
valid_x_class = class_index[random_index[train_size:train_size+valid_size]]
valid_y_class = y[valid_x_class]
valid_x = np.append(valid_x, valid_x_class).astype(int)
valid_y = np.append(valid_y, valid_y_class).astype(int)
# Choose datapoints from validation set at random to form a batch
potential_elements = np.array(list(enumerate(np.array(valid_x))))
indices = np.random.choice(potential_elements[:,0].reshape(-1,), batch_size, replace=False)
# The batch index_variable contains the indices of the batch datapoints inside the complete dataset
batch_index = potential_elements[:,1].reshape(-1,)[indices]
# Build data matrix and normalize features
X = pd.DataFrame(features_og['mfcc'], np.append(train_x, batch_index))
X -= X.mean(axis=0)
X /= X.std(axis=0)
# Compute distances between all datapoints
distances = spatial.distance.squareform(spatial.distance.pdist(X,'cosine'))
n=distances.shape[0]
# Build weight matrix
kernel_width = distances.mean()
W = np.exp(np.divide(-np.square(distances),kernel_width**2))
# Make sure the diagonal is 0 for the weight matrix
np.fill_diagonal(W,0)
# compute laplacian
degrees = np.sum(W,axis=0)
laplacian = np.diag(degrees**-0.5) @ (np.diag(degrees) - W) @ np.diag(degrees**-0.5)
laplacian = sparse.csr_matrix(laplacian)
# Spectral Clustering --------------------------------------------------------------------------------
eigenvalues, eigenvectors = sparse.linalg.eigsh(A=laplacian,k=25,which='SM')
# Splitt Eigenvectors into train and validation parts
train_features = eigenvectors[:len(train_x),:]
valid_features = eigenvectors[len(train_x):,:]
clf = QuadraticDiscriminantAnalysis()
predict_y = fit_and_test(clf, train_features, train_y, valid_features, np.array(y[batch_index]))
valid_y_adapted, class_names_main = adapt_labels(np.array(y[batch_index]),class_names)
predict_y_adapted, class_names_main = adapt_labels(predict_y,class_names)
accuracy_mat[0,itr] = np.sum(valid_y_adapted==predict_y_adapted)/len(valid_y_adapted)
# Semi-Supervised Learning-----------------------------------------------------------------------------
# Sparsify using k- nearest neighbours and make sure it stays symmetric
# Make sure
for i in range(W.shape[0]):
idx = W[i,:].argsort()[:-NEIGHBORS]
W[i,idx] = 0
W[idx,i] = 0
# Build normalized Laplacian Matrix
D = np.sum(W,axis=0)
L = np.diag(D**-0.5) @ (np.diag(D) - W) @ np.diag(D**-0.5)
L = sparse.csr_matrix(L)
# Build one-hot encoded class matrix
Y_t = np.eye(len(class_names))[train_y - 1].T
# Create Mask Matrix
M = np.zeros((len(class_names), len(train_y) + batch_size))
M[:len(train_y),:len(train_y)] = 1
# Create extened label matrix and vector
Y = np.concatenate((Y_t, np.zeros((len(class_names), batch_size))), axis=1)
# Solve for the matrix X
Y_hat = solve(Y, M, L,alpha = 1e-3, beta = 1e-7)
# Go from matrix X to estimated label vector x_hat
y_predict = np.argmax(Y_hat,axis = 0)+np.ones(Y_hat[0,:].shape)
# Adapt the labels, whee all words of the category "unknown" are unified
y_predict_adapted, class_names_main = adapt_labels(y_predict,class_names)
y_adapted, class_names_main = adapt_labels(np.array(y[batch_index]),class_names)
# Compute accuracy in predicting unknown labels
accuracy_mat[1,itr] = np.sum(y_predict_adapted[-batch_size:]==y_adapted)/batch_size
return accuracy_mat
| mit |
xavierwu/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
mjescobar/RF_Estimation | Clustering/helpers/processClusters/processClustersBin2.py | 2 | 4923 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# processClustersBin2.py
#
# Copyright 2015 Monica Otero <monicaot2011@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..','LIB'))
import rfestimationLib as rfe #Some custom functions
import argparse #argument parsing
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from numpy import loadtxt
from numpy import shape
from numpy import histogram
from numpy import amax
from numpy import amin
from numpy import append
from numpy import zeros
from numpy import empty
from numpy import arange
#Input file format
# 0-19 Timestamps
# aRadius
# bRadius
# angle
# xCoordinate
# yCoordinate
# area
# clusterId
# peakTime
# On/Off
clustersColours = ['green', 'red', 'blue', 'yellow', 'black','indigo', \
'#ff006f','#00e8ff','#fcfa00', '#ff0000', '#820c2c', \
'#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff', \
'#0c820e','#28ea04','#ea8404','#c8628f','#6283ff', \
'#5b6756','#0c8248','k','#820cff','#932c11', \
'#002c11','#829ca7']
def loadClusterFile(sourceFile):
data = loadtxt(sourceFile, delimiter=',')
return data
def graficaHistograma(areaTotal,areaInteres,outputFolder,titulo,clusterId,binsCalculados):
plt.hist(areaTotal, bins=binsCalculados,\
histtype='stepfilled', normed=0, color='grey', alpha=0.2, label='Total')
plt.hist(areaInteres, bins=binsCalculados,\
histtype='stepfilled', normed=0, color=clustersColours[clusterId], alpha=0.4, label=titulo)
plt.title('Total/'+titulo)
plt.xlabel('Area')
plt.ylabel('Units')
plt.legend()
plt.savefig(outputFolder+titulo+'_cluster_'+str(clusterId)+'.png', bbox_inches='tight')
plt.close()
return 0
def main():
parser = argparse.ArgumentParser(prog='processClustersBin2.py',
description='Plot units from clustering',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFile',
help='Source file containing the units and its data',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
args = parser.parse_args()
#Source file of the units
sourceFile = args.sourceFile
if not os.path.exists(sourceFile):
print ''
print 'Source file does not exists ' + sourceFile
print ''
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
print ''
sys.exit()
Units = loadClusterFile(sourceFile)
# Slow, Fast ?
# Separación en base a 2 bins segun peak time
peaks = Units[:,27]
hist,edges = histogram(peaks,bins=2)
slowMaximum = edges[1]
fastMaximum = edges[2]
#print 'slowMaximum',slowMaximum
#print 'fastMaximum',fastMaximum
# Por cada cluster recorro las units
numClusters = int(max(Units[:,26]))
for clusterId in range(numClusters + 1):
slowUnits = empty([1, 29])
fastUnits = empty([1, 29])
# Por cada unit las separo en lentas y rapidas dependiendo del Hist anerior
for unitId in range(len(Units)):
if Units[unitId,26] == clusterId:
#print 'clusterId',clusterId
#print 'peak',Units[unitId,27]
if Units[unitId,27] <= slowMaximum:
slowUnits = append(slowUnits,Units[unitId].reshape(1, 29), axis=0)
else:
fastUnits = append(fastUnits,Units[unitId].reshape(1, 29), axis=0)
# Elimino la primera fila
slowUnits = slowUnits[1:,:]
fastUnits = fastUnits[1:,:]
areaTotal = Units[:,25]
binwidth = 30
binsCalculados = arange(min(areaTotal), max(areaTotal) + binwidth, binwidth)
# Podria quedar un bin vacio (creo)?
if shape(slowUnits)[0] > 0 :
# Extraigo caracteristica de interes
areaSlows = slowUnits[:,25]
# Graficas
graficaHistograma(areaTotal,areaSlows,outputFolder,'Slows',clusterId,binsCalculados)
# Podria quedar un bin vacio (creo)?
if shape(fastUnits)[0] > 0 :
# Extraigo caracteristica de interes
areaFasts = fastUnits[:,25]
# Graficas
graficaHistograma(areaTotal,areaFasts,outputFolder,'Fasts',clusterId,binsCalculados)
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
kaichogami/sympy | sympy/interactive/printing.py | 19 | 16124 | """Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
# mathtext can't render some LaTeX commands. For example, it can't
# render any LaTeX environments such as array or matrix. So here we
# ensure that if mathtext fails to render, we return None.
try:
return latex_to_png(o)
except ValueError as e:
debug('matplotlib exception caught:', repr(e))
return None
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError as e:
debug('preview failed with:', repr(e),
' Falling back to matplotlib backend')
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if in_ipython:
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
| bsd-3-clause |
PythonCharmers/bokeh | bokeh/charts/builder/tests/test_heatmap_builder.py | 33 | 4145 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import HeatMap
from bokeh.models import FactorRange
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHeatMap(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['apples'] = [4,5,8]
xyvalues['bananas'] = [1,2,4]
xyvalues['pears'] = [6,5,4]
xyvaluesdf = pd.DataFrame(xyvalues, index=['2009', '2010', '2011'])
# prepare some data to check tests results...
heights = widths = [0.95] * 9
colors = ['#e2e2e2', '#75968f', '#cc7878', '#ddb7b1', '#a5bab7', '#ddb7b1',
'#550b1d', '#e2e2e2', '#e2e2e2']
catx = ['apples', 'bananas', 'pears', 'apples', 'bananas', 'pears',
'apples', 'bananas', 'pears']
rates = [4, 1, 6, 5, 2, 5, 8, 4, 4]
caty = ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(HeatMap, _xy, palette=colors)
builder = hm._builders[0]
# TODO: Fix bug
#self.assertEqual(sorted(hm.groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['height'], heights)
assert_array_equal(builder._data['width'], widths)
assert_array_equal(builder._data['catx'], catx)
assert_array_equal(builder._data['rate'], rates)
assert_array_equal(builder._source._data, builder._data)
assert_array_equal(hm.x_range.factors, builder._catsx)
assert_array_equal(hm.y_range.factors, builder._catsy)
self.assertIsInstance(hm.x_range, FactorRange)
self.assertIsInstance(hm.y_range, FactorRange)
# TODO: (bev) not sure what correct behaviour is
#assert_array_equal(builder._data['color'], colors)
if i == 0: # if DataFrame
assert_array_equal(builder._data['caty'], caty)
else:
_caty = ['2009']*3 + ['2010']*3 + ['2011']*3
assert_array_equal(builder._data['caty'], _caty)
catx = ['0', '1', '2', '0', '1', '2', '0', '1', '2']
lvalues = [[4,5,8], [1,2,4], [6,5,4]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(HeatMap, _xy, palette=colors)
builder = hm._builders[0]
# TODO: FIX bug
#self.assertEqual(sorted(hm.groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['height'], heights)
assert_array_equal(builder._data['width'], widths)
assert_array_equal(builder._data['catx'], catx)
assert_array_equal(builder._data['rate'], rates)
assert_array_equal(builder._source._data, builder._data)
assert_array_equal(hm.x_range.factors, builder._catsx)
assert_array_equal(hm.y_range.factors, builder._catsy)
self.assertIsInstance(hm.x_range, FactorRange)
self.assertIsInstance(hm.y_range, FactorRange)
assert_array_equal(builder._data['caty'], caty)
# TODO: (bev) not sure what correct behaviour is
# assert_array_equal(builder._data['color'], colors)
| bsd-3-clause |
alexis-jacq/Mutual_Modelling | tools/teacher_learner_noise.py | 1 | 3735 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import random
from mutualModelling import model,agent0
import matplotlib.pyplot as plt
import copy
def create_teacher(name,all_names):
percepts = ["a","b","c"]
actions = ["a","b","c","reward","punish"]
#rewards = [["success",1.,1.],["fail",1.,-1.]]
rewards = [["a",1.,-1.],["b",1.,1.],['c',1.,-1.]]
teacher = agent.Agent(name,all_names,percepts,actions,rewards)
return teacher
def create_learner(name,all_names):
percepts = ["reward","noise"]
actions = ["a","b","c","imitate"]
rewards = [["reward",1.,1.],["punish",1.,-1],["noise",1.,0.1]]
learner = agent.Agent(name,all_names,percepts,actions,rewards)
return learner
"""
"""
# parameters
name1 = "teacher"
name2 = "learner"
all_names = [name1,name2]
N = 100
n = 2000
CUMREW = np.zeros(n)
CUMREW2 = np.zeros(n)
L_curve1 = np.zeros(n-1)
L_curve2 = np.zeros(n-1)
T_curve1 = np.zeros(n-1)
T_curve2 = np.zeros(n-1)
def world_update(action1,action2):
real_action = action2
p1 = [(action2,1.)]
p2 = [(action1,1.)]
if action2 =="imitate":
real_action = action1
p1.append((real_action,1.))
r = 0
if "b"==real_action:
#p1.append(("success",1.))
#p2.append(("success",1.))
r = 1
else:
#p1.append(("fail",1.))
#p2.append(("fail",1.))
if action2=="c":
p2.append(("noise",1)) # e.g. football match on TV
model_percepts1 = {name1:p1,name2:p2,name2+":"+name1:p1}
model_percepts2 = {name2:p2,name1:p1,name1+":"+name2:p2}
model_actions1 = {name2:action2,name2+":"+name1:action1}
model_actions2 = {name1:action1,name1+":"+name2:action2}
return model_percepts1,model_percepts2,model_actions1,model_actions2,r
case = "MM1"
for i in range(N):
if i>N/2.:
case="MM2"
if i%10==0:
print i
teacher = create_teacher(name1,all_names)
learner = create_learner(name2,all_names)
cumrew = []
model_percepts1 = None
model_percepts2 = None
model_actions1 = None
model_actions2 = None
action1 = ""
action2 = ""
previous = []
for j in range(n):
if j>n/2.:
learner.M["learner"].set_rewards([["reward",1,-1.],["punish",1,1.]])
action1 = teacher.update_models(None,model_percepts1,model_actions1,case)
action2 = learner.update_models(None,model_percepts2,model_actions2,case)
model_percepts1,model_percepts2,model_actions1,model_actions2,r = world_update(action1,action2)
cumrew.append(r)
if i>N/2.:
CUMREW+=2*(np.arange(n) - np.cumsum(np.array(cumrew)))/float(N)
L_curve1 += np.array(learner.social_curve)/float(N)
T_curve1 += np.array(teacher.social_curve)/float(N)
else:
CUMREW2+=2*(np.arange(n) - np.cumsum(np.array(cumrew)))/float(N)
L_curve2 += np.array(learner.social_curve)/float(N)
T_curve2 += np.array(teacher.social_curve)/float(N)
print 'teacher think about learner:'
teacher.show_learned_rewards('learner')
print ' learner think about teacher:learner '
learner.show_learned_rewards('teacher:learner')
print 'actual learner'
learner.show_learned_rewards('learner')
print "================================="
print ' learner think about teacher:'
learner.show_learned_rewards('teacher')
print ' teacher think about learner:teacher '
teacher.show_learned_rewards('learner:teacher')
print 'actual teacher:'
teacher.show_learned_rewards('teacher')
teacher.show_social_error('learner')
plt.subplot(3, 1, 1)
plt.plot(CUMREW,'b')
plt.plot(CUMREW2,'r')
plt.subplot(3, 1, 2)
plt.plot(L_curve1,'b')
plt.plot(L_curve2,'r')
plt.subplot(3, 1, 3)
plt.plot(T_curve1,'b')
plt.plot(T_curve2,'r')
plt.show()
| isc |
luo66/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
FelixPM/Learning-Machine-Learning | sentdex/best_fit_slope.py | 1 | 2180 | """Linear Regression code
Find slope and intercept
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import random
style.use('fivethirtyeight')
def create_dataset(how_many_points, variance, step=2, correlation='pos'):
val = 1
ys = []
for i in range(how_many_points):
yt = val + random.randrange(-variance, variance)
ys.append(yt)
if correlation and correlation == 'pos':
val += step
elif correlation and correlation == 'neg':
val -= step
xs = [i for i in range(len(ys))]
return np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64)
def best_fit_slope_and_intercept(xs, ys):
"""
Calculates slope and intercept from input points
:param xs: x points
:param ys: y points
:return: slope m, intercept b
"""
get_m = (((np.mean(xs) * np.mean(ys)) - np.mean(xs * ys)) /
((np.mean(xs) * np.mean(xs)) - np.mean(xs * xs)))
get_b = np.mean(ys) - get_m * np.mean(xs)
return get_m, get_b
def squared_error(y_orig, y_line):
"""
Total sum of squared errors
:param y_orig: original y points
:param y_line: fitted y points
:return: int square error
"""
return sum((y_line - y_orig) ** 2)
def coefficient_of_determination(y_orig, y_line):
"""
:param y_orig: original y points
:param y_line: fitted y points
:return: int coefficient of determination
"""
y_mean_line = np.array([np.mean(y_orig) for _ in y_orig])
square_error_regression = squared_error(y_orig, y_line)
square_error_y_mean = squared_error(y_orig, y_mean_line)
return 1 - (square_error_regression / square_error_y_mean)
# Points
# x = np.array([1, 2, 3, 4, 5, 6])
# y = np.array([5, 4, 6, 5, 6, 7])
x, y = create_dataset(40, 10, 2, 'pos')
# Fit and predict
m, b = best_fit_slope_and_intercept(x, y)
regression_line = np.array([(m * x1) + b for x1 in x])
predict_x = 45
predict_y = m * predict_x + b
r_squared = coefficient_of_determination(y, regression_line)
print(r_squared)
# Plot
plt.scatter(x, y)
plt.scatter(predict_x, predict_y, color='g')
plt.plot(x, regression_line)
plt.show()
| mit |
bavardage/statsmodels | statsmodels/sandbox/multilinear.py | 3 | 14059 | """Analyze a set of multiple variables with a linear models
multiOLS:
take a model and test it on a series of variables defined over a
pandas dataset, returning a summary for each variable
multigroup:
take a boolean vector and the definition of several groups of variables
and test if the group has a fraction of true values higher than the
rest. It allows to test if the variables in the group are significantly
more significant than outside the group.
"""
from patsy import dmatrix
import pandas as pd
from statsmodels.api import OLS
from statsmodels.api import stats
import numpy as np
import logging
def _model2dataframe(model_endog, model_exog, model_type=OLS, **kwargs):
"""return a series containing the summary of a linear model
All the exceding parameters will be redirected to the linear model
"""
# create the linear model and perform the fit
model_result = model_type(model_endog, model_exog, **kwargs).fit()
# keeps track of some global statistics
statistics = pd.Series({'r2': model_result.rsquared,
'adj_r2': model_result.rsquared_adj})
# put them togher with the result for each term
result_df = pd.DataFrame({'params': model_result.params,
'pvals': model_result.pvalues,
'std': model_result.bse,
'statistics': statistics})
# add the complexive results for f-value and the total p-value
fisher_df = pd.DataFrame({'params': {'_f_test': model_result.fvalue},
'pvals': {'_f_test': model_result.f_pvalue}})
# merge them and unstack to obtain a hierarchically indexed series
res_series = pd.concat([result_df, fisher_df]).unstack()
return res_series.dropna()
def multiOLS(model, dataframe, column_list=None, method='fdr_bh',
alpha=0.05, subset=None, model_type=OLS, **kwargs):
"""apply a linear model to several endogenous variables on a dataframe
Take a linear model definition via formula and a dataframe that will be
the environment of the model, and apply the linear model to a subset
(or all) of the columns of the dataframe. It will return a dataframe
with part of the information from the linear model summary.
Parameters
----------
model : string
formula description of the model
dataframe : pandas.dataframe
dataframe where the model will be evaluated
column_list : list of strings, optional
Names of the columns to analyze with the model.
If None (Default) it will perform the function on all the
eligible columns (numerical type and not in the model definition)
model_type : model class, optional
The type of model to be used. The default is the linear model.
Can be any linear model (OLS, WLS, GLS, etc..)
method: string, optional
the method used to perform the pvalue correction for multiple testing.
default is the Benjamini/Hochberg, other available methods are:
`bonferroni` : one-step correction
`sidak` : on-step correction
`holm-sidak` :
`holm` :
`simes-hochberg` :
`hommel` :
`fdr_bh` : Benjamini/Hochberg
`fdr_by` : Benjamini/Yekutieli
alpha: float, optional
the significance level used for the pvalue correction (default 0.05)
subset: boolean array
the selected rows to be used in the regression
all the other parameters will be directed to the model creation.
Returns
-------
summary : pandas.DataFrame
a dataframe containing an extract from the summary of the model
obtained for each columns. It will give the model complexive f test
result and p-value, and the regression value and standard deviarion
for each of the regressors. The Dataframe has a hierachical column
structure, divided as:
- params: contains the parameters resulting from the models. Has
an additional column named _f_test containing the result of the
F test.
- pval: the pvalue results of the models. Has the _f_test column
for the significativity of the whole test.
- adj_pval: the corrected pvalues via the multitest function.
- std: uncertainties of the model parameters
- statistics: contains the r squared statistics and the adjusted
r squared.
Notes
-----
The main application of this function is on system biology to perform
a linear model testing of a lot of different parameters, like the
different genetic expression of several genes.
See Also
--------
statsmodels.stats.multitest
contains several functions to perform the multiple p-value correction
Examples
--------
Using the longley data as dataframe example
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load_pandas()
>>> df = data.exog
>>> df['TOTEMP'] = data.endog
This will perform the specified linear model on all the
other columns of the dataframe
>>> multiOLS('GNP + 1', df)
This select only a certain subset of the columns
>>> multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
It is possible to specify a trasformation also on the target column,
conforming to the patsy formula specification
>>> multiOLS('GNP + 0', df, ['I(GNPDEFL**2)', 'center(TOTEMP)'])
It is possible to specify the subset of the dataframe
on which perform the analysis
>> multiOLS('GNP + 1', df, subset=df.GNPDEFL > 90)
Even a single column name can be given without enclosing it in a list
>>> multiOLS('GNP + 0', df, 'GNPDEFL')
"""
# data normalization
# if None take all the numerical columns that aren't present in the model
# it's not waterproof but is a good enough criterion for everyday use
if column_list is None:
column_list = [name for name in dataframe.columns
if dataframe[name].dtype != object and name not in model]
# if it's a single string transform it in a single element list
if isinstance(column_list, basestring):
column_list = [column_list]
if subset is not None:
dataframe = dataframe.ix[subset]
# perform each model and retrieve the statistics
col_results = {}
# as the model will use always the same endogenous variables
# we can create them once and reuse
model_exog = dmatrix(model, data=dataframe, return_type="dataframe")
for col_name in column_list:
# it will try to interpret the column name as a valid dataframe
# index as it can be several times faster. If it fails it
# interpret it as a patsy formula (for example for centering)
try:
model_endog = dataframe[col_name]
except KeyError:
model_endog = dmatrix(col_name + ' + 0', data=dataframe)
# retrieve the result and store them
res = _model2dataframe(model_endog, model_exog, model_type, **kwargs)
col_results[col_name] = res
# mangle them togheter and sort by complexive p-value
summary = pd.DataFrame(col_results)
# order by the p-value: the most useful model first!
summary = summary.T.sort([('pvals', '_f_test')])
summary.index.name = 'endogenous vars'
# implementing the pvalue correction method
smt = stats.multipletests
for (key1, key2) in summary:
if key1 != 'pvals':
continue
p_values = summary[key1, key2]
corrected = smt(p_values, method=method, alpha=alpha)[1]
# extend the dataframe of results with the column
# of the corrected p_values
summary['adj_' + key1, key2] = corrected
return summary
def _test_group(pvalues, group_name, group, exact=True):
"""test if the objects in the group are different from the general set.
The test is performed on the pvalues set (ad a pandas series) over
the group specified via a fisher exact test.
"""
from scipy.stats import fisher_exact
try:
from scipy.stats import chi2_contingency
except ImportError:
def chi2_contingency(*args, **kwds):
raise ValueError('exact=False is not available with old scipy')
totals = 1.0 * len(pvalues)
total_significant = 1.0 * np.sum(pvalues)
cross_index = [c for c in group if c in pvalues.index]
missing = [c for c in group if c not in pvalues.index]
if missing:
s = ('the test is not well defined if the group '
'has elements not presents in the significativity '
'array. group name: {}, missing elements: {}')
logging.warning(s.format(group_name, missing))
# how many are significant and not in the group
group_total = 1.0 * len(cross_index)
group_sign = 1.0 * len([c for c in cross_index if pvalues[c]])
group_nonsign = 1.0 * (group_total - group_sign)
# how many are significant and not outside the group
extern_sign = 1.0 * (total_significant - group_sign)
extern_nonsign = 1.0 * (totals - total_significant - group_nonsign)
# make the fisher test or the chi squared
test = fisher_exact if exact else chi2_contingency
table = [[extern_nonsign, extern_sign], [group_nonsign, group_sign]]
pvalue = test(np.array(table))[1]
# is the group more represented or less?
part = group_sign, group_nonsign, extern_sign, extern_nonsign
#increase = (group_sign / group_total) > (total_significant / totals)
increase = np.log((totals * group_sign)
/ (total_significant * group_total))
return pvalue, increase, part
def multigroup(pvals, groups, exact=True, keep_all=True, alpha=0.05):
"""Test if the given groups are different from the total partition.
Given a boolean array test if each group has a proportion of positives
different than the complexive proportion.
The test can be done as an exact Fisher test or approximated as a
Chi squared test for more speed.
Parameters
----------
pvals: pandas series of boolean
the significativity of the variables under analysis
groups: dict of list
the name of each category of variables under exam.
each one is a list of the variables included
exact: boolean, optional
If True (default) use the fisher exact test, otherwise
use the chi squared test for contingencies tables.
For high number of elements in the array the fisher test can
be significantly slower than the chi squared.
keep_all: boolean, optional
if False it will drop those groups where the fraction
of positive is below the expected result. If True (default)
it will keep all the significant results.
alpha: float, optional
the significativity level for the pvalue correction
on the whole set of groups (not inside the groups themselves).
Returns
-------
result_df: pandas dataframe
for each group returns:
pvals - the fisher p value of the test
adj_pvals - the adjusted pvals
increase - the log of the odd ratio between the
internal significant ratio versus the external one
_in_sign - significative elements inside the group
_in_non - non significative elements inside the group
_out_sign - significative elements outside the group
_out_non - non significative elements outside the group
Notes
-----
This test allow to see if a category of variables is generally better
suited to be described for the model. For example to see if a predictor
gives more information on demographic or economical parameters,
by creating two groups containing the endogenous variables of each
category.
This function is conceived for medical dataset with a lot of variables
that can be easily grouped into functional groups. This is because
The significativity of a group require a rather large number of
composing elements.
Examples
--------
A toy example on a real dataset, the Guerry dataset from R
>>> url = "http://vincentarelbundock.github.com/"
>>> url = url + "Rdatasets/csv/HistData/Guerry.csv"
>>> df = pd.read_csv(url, index_col='dept')
evaluate the relationship between the variuos paramenters whith the Wealth
>>> pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
define the groups
>>> groups = {}
>>> groups['crime'] = ['Crime_prop', 'Infanticide',
... 'Crime_parents', 'Desertion', 'Crime_pers']
>>> groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
>>> groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
do the analysis of the significativity
>>> multigroup(pvals < 0.05, groups)
"""
pvals = pd.Series(pvals)
if not (set(pvals.unique()) <= set([False, True])):
raise ValueError("the series should be binary")
if hasattr(pvals.index, 'is_unique') and not pvals.index.is_unique:
raise ValueError("series with duplicated index is not accepted")
results = {'pvals': {},
'increase': {},
'_in_sign': {},
'_in_non': {},
'_out_sign': {},
'_out_non': {}}
for group_name, group_list in groups.iteritems():
res = _test_group(pvals, group_name, group_list, exact)
results['pvals'][group_name] = res[0]
results['increase'][group_name] = res[1]
results['_in_sign'][group_name] = res[2][0]
results['_in_non'][group_name] = res[2][1]
results['_out_sign'][group_name] = res[2][2]
results['_out_non'][group_name] = res[2][3]
result_df = pd.DataFrame(results).sort('pvals')
if not keep_all:
result_df = result_df[result_df.increase]
smt = stats.multipletests
corrected = smt(result_df['pvals'], method='fdr_bh', alpha=alpha)[1]
result_df['adj_pvals'] = corrected
return result_df
| bsd-3-clause |
JosmanPS/scikit-learn | sklearn/linear_model/coordinate_descent.py | 43 | 75144 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
bucricket/projectMASpreprocess | preparepydisalexi/processLandsatLAI.py | 1 | 14392 | #!/usr/bin/env python2
#!/Applications/anaconda/envs/root3 python
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 1 13:50:04 2016
@author: mschull
"""
#python
from .search import Search
import os
import subprocess
import glob
import shutil
import pandas as pd
import datetime
import argparse
import getpass
import keyring
import json
from pyproj import Proj
from .utils import folders,search
from .Clients import Client
from .Order import Order
from .OrderTemplate import OrderTemplate
import pycurl
base = os.getcwd()
Folders = folders(base)
modisBase = Folders['modisBase']
landsatSR = Folders['landsatSR']
landsatLAI = Folders['landsatLAI']
landsatTemp = os.path.join(landsatSR,'temp')
if not os.path.exists(landsatTemp):
os.mkdir(landsatTemp)
def getLandsatData(loc,startDate,endDate,auth):
data = {'olitirs8':{"inputs":[],"products": ["sr", "bt","cloud"]},"format":"gtiff",
"plot_statistics":False,"note":""}
with open('order.json', 'w') as outfile:
json.dump(data, outfile)
# build the various handlers to spec
template = OrderTemplate('template')
template.load(path='./order.json' )
order = Order(template, note="Lat%dLon%d-%s_%s" %(int(loc[0]),int(loc[1]),startDate,endDate))
client = Client(auth) # will prompt user for username and password if auth argument not supplied
#downloader = EspaLandsatLocalDownloader('USGS_downloads')
# find cloud free landsat scenes
try:
s = Search()
scenes = s.search(lat=loc[0],lon=loc[1],limit = 100, start_date = startDate,end_date=endDate, cloud_max=5)
l8_tiles=[]
for i in range(len(scenes['results'])):
path = scenes['results'][i]['path']
row = scenes['results'][i]['row']
sceneID = scenes['results'][i]['sceneID']
if sceneID.startswith('LC'):
dataFN = os.path.join(landsatSR,"%s%s" %(path,row),"%s.xml" % sceneID)
if not os.path.exists(dataFN):
l8_tiles.append(sceneID)
else:
files = glob.glob("%s*" % dataFN[:-4])
for file in files:
os.symlink(file,os.path.join(landsatTemp,file.split(os.sep)[-1]))
#shutil.copy(file,landsatTemp)
except:
sceneIDs = search(loc[0],loc[1],startDate, endDate)
l8_tiles=[]
for i in range(len(sceneIDs)):
l8_tiles.append(sceneIDs[i])
print l8_tiles
if l8_tiles:
# order the data
order.add_tiles("olitirs8", l8_tiles)
#order.add_tiles("etm7", l7_tiles)
response = order.submit(client)
# view the servers whole response. which might indicate an ordering error!
print(response)
# assuming there were no order submission errors
orderid = response['orderid']
# now start the downloader!
for download in client.download_order_gen(orderid):
print(download)
# download is a tuple with the filepath, and True if the file
# is a fresh download.
# this is where data pipeline scripts go that can operate
# on files as they are downloaded (generator),
# See the Client class for further documentation.
def getMODISlai(tiles,product,version,startDate,endDate,auth):
subprocess.call(["modis_download.py", "-r", "-U", "%s" % auth[0], "-P",
"%s" % auth[1],"-p", "%s.%s" % (product,version), "-t",
"%s" % tiles,"-s","MOTA", "-f", "%s" % startDate,"-e", "%s" % endDate,
"%s" % modisBase])
def latlon2MODtile(lat,lon):
# reference: https://code.env.duke.edu/projects/mget/wiki/SinusoidalMODIS
p_modis_grid = Proj('+proj=sinu +R=6371007.181 +nadgrids=@null +wktext')
x, y = p_modis_grid(lon, lat)
# or the inverse, from x, y to lon, lat
lon, lat = p_modis_grid(x, y, inverse=True)
tileWidth = 1111950.5196666666
ulx = -20015109.354
uly = -10007554.677
H = (x-ulx)/tileWidth
V = 18-((y-uly)/tileWidth)
return int(V),int(H)
def geotiff2envi():
geotiffConvert = 'GeoTiff2ENVI'
bands = ["blue","green","red","nir","swir1","swir2","cloud"]
l8bands = ["sr_band2","sr_band3","sr_band4","sr_band5","sr_band6","sr_band7","cfmask"]
landsatFiles = glob.glob(os.path.join(landsatTemp,"*.xml"))
for i in range(len(landsatFiles)):
fstem = landsatFiles[i][:-4]
for i in range(len(bands)):
tifFile = fstem+"_%s.tif" % l8bands[i]
datFile = fstem+"_%s.%s.dat" % (l8bands[i],bands[i])
subprocess.call(["%s" % geotiffConvert ,"%s" % tifFile, "%s" % datFile])
def sample():
sample = 'lndlai_sample'
bands = ["blue","green","red","nir","swir1","swir2","cloud"]
l8bands = ["sr_band2","sr_band3","sr_band4","sr_band5","sr_band6","sr_band7","cfmask"]
landsatFiles = glob.glob(os.path.join(landsatTemp,"*.xml"))
for i in range(len(landsatFiles)):
sceneID = landsatFiles[i].split(os.sep)[-1][:-4]
# extract the Landsat doy and year
ldoy = sceneID[13:16]
year = int(sceneID[9:13])
# convert to date
dd = datetime.datetime(year, 1, 1) + datetime.timedelta(int(ldoy) - 1)
date = '%d-%02d-%02d' % (dd.year,dd.month,dd.day)
# find the 4 day MODIS doy prior to the Landsat doy
mdoy = int((int((float(ldoy)-1.)/4.)*4.)+1)
modFiles = glob.glob(os.path.join(modisBase,"MCD15A3.A%s%s.*.hdf" % (year,mdoy)))
fstem = landsatFiles[i][:-4]
laiPath = landsatLAI
if not os.path.exists(laiPath):
os.mkdir(laiPath)
sam_file = os.path.join(laiPath,"SR_LAI.%s.%s.MCD15A3_A%s%s.txt" %(date,sceneID,year,mdoy))
for i in range(len(modFiles)):
fn = os.path.join(laiPath,"slai%s.inp" % i)
file = open(fn, "w")
file.write("LANDSAT_BASE_BLUE = %s_%s.%s.dat\n" % (fstem,l8bands[0],bands[0]))
file.write("LANDSAT_BASE_GREEN = %s_%s.%s.dat\n" % (fstem,l8bands[1],bands[1]))
file.write("LANDSAT_BASE_RED = %s_%s.%s.dat\n" % (fstem,l8bands[2],bands[2]))
file.write("LANDSAT_BASE_NIR = %s_%s.%s.dat\n" % (fstem,l8bands[3],bands[3]))
file.write("LANDSAT_BASE_SWIR1 = %s_%s.%s.dat\n" % (fstem,l8bands[4],bands[4]))
file.write("LANDSAT_BASE_SWIR2 = %s_%s.%s.dat\n" % (fstem,l8bands[5],bands[5]))
file.write("LANDSAT_BASE_CLOUD = %s_%s.%s.dat\n" % (fstem,l8bands[6],bands[6]))
file.write("MODIS_BASE_FILE = %s\n" % modFiles[i])
file.write("SAMPLE_FILE_OUT = %s\n" % sam_file)
file.write("PURE_SAMPLE_TH = 0.2\n")
file.close()
subprocess.call(["%s" % sample , "%s" % fn])
os.remove(os.path.join(laiPath,"slai%s.inp" % i))
def train():
cubist = 'cubist'
landsatFiles = glob.glob(os.path.join(landsatLAI,"*.txt"))
#======combine input data======================================
df = pd.DataFrame(columns=['ulx','uly','blue',
'green','red','nir','swir1','swir2','ndvi','ndwi','lai','weight','satFlag'])
for i in range(len(landsatFiles)):
sam_file = landsatFiles[i]
df = df.append(pd.read_csv(sam_file,delim_whitespace=True,names=['ulx','uly','blue',
'green','red','nir','swir1','swir2','ndvi','ndwi','lai','weight','satFlag']),ignore_index=True)
#=====create filestem.data====================================
df = df[(df.satFlag=='N')]
df = df.sort_values(by='weight')
startDate='200'
endDate = '300'
filestem = os.path.join(landsatLAI,"lndsr_modlai_samples.combined_%s-%s" %(startDate,endDate))
df.to_csv(os.path.join(landsatLAI,filestem+".data"), columns = ['blue','green','red',
'nir','swir1','swir2','ndvi','ndwi','lai','weight'],header=None,
index=None, mode='w', sep="\t", encoding='utf-8')
#====create filestem.names====================================
fn = os.path.join(landsatLAI,"%s.names" % filestem)
file = open(fn, "w")
file.write("lai.\n")
file.write("B1: continuous\n")
file.write("B2: continuous\n")
file.write("B3: continuous\n")
file.write("B4: continuous\n")
file.write("B5: continuous\n")
file.write("B7: continuous\n")
file.write("ndvi: continuous\n")
file.write("ndwi: continuous\n")
file.write("lai: continuous\n")
file.write("case weight: continuous\n")
file.write("attributes excluded: B1, B2, B7, ndvi, ndwi\n")
file.close()
nrules = 5
subprocess.call(["%s" % cubist , "-f" ,"%s" % filestem, "-r", "%d" % nrules, "-u"])
def compute():
lndbio ='lndlai_compute'
bands = ["blue","green","red","nir","swir1","swir2","cloud"]
l8bands = ["sr_band2","sr_band3","sr_band4","sr_band5","sr_band6","sr_band7","cfmask"]
landsatFiles = glob.glob(os.path.join(landsatTemp,"*.xml"))
for i in range(len(landsatFiles)):
sceneID = landsatFiles[i].split(os.sep)[-1][:-4]
fstem = landsatFiles[i][:-4]
# create a folder for lai if it does not exist
#laiPath = os.path.join(landsatLAI,'%s' % sceneID[9:16])
laiPath = os.path.join(landsatLAI,'%s' % sceneID[3:9])
if not os.path.exists(laiPath):
os.mkdir(laiPath)
startDate='200'
endDate = '300'
filestem = os.path.join(landsatLAI,"lndsr_modlai_samples.combined_%s-%s" %(startDate,endDate))
laiFN = os.path.join(landsatLAI,"lndlai.%s.hdf" % sceneID)
fn = os.path.join(landsatLAI,"compute_lai%s.inp")
file = open(fn, "w")
file.write("LANDSAT_BASE_BLUE = %s_%s.%s.dat\n" % (fstem,l8bands[0],bands[0]))
file.write("LANDSAT_BASE_GREEN = %s_%s.%s.dat\n" % (fstem,l8bands[1],bands[1]))
file.write("LANDSAT_BASE_RED = %s_%s.%s.dat\n" % (fstem,l8bands[2],bands[2]))
file.write("LANDSAT_BASE_NIR = %s_%s.%s.dat\n" % (fstem,l8bands[3],bands[3]))
file.write("LANDSAT_BASE_SWIR1 = %s_%s.%s.dat\n" % (fstem,l8bands[4],bands[4]))
file.write("LANDSAT_BASE_SWIR2 = %s_%s.%s.dat\n" % (fstem,l8bands[5],bands[5]))
file.write("LANDSAT_BASE_CLOUD = %s_%s.%s.dat\n" % (fstem,l8bands[6],bands[6]))
file.write("LANDSAT_ANC_FILE = %s\n" % filestem)
file.write("BIOPHYSICS_PARA_FILE_OUT = %s\n" % laiFN)
file.close()
subprocess.call(["%s" % lndbio , "%s" % fn])
shutil.move(laiFN,os.path.join(laiPath,"lndlai.%s.hdf" % sceneID))
os.remove(fn)
#=====CLEANING UP========
filelist = [ f for f in os.listdir(landsatLAI) if f.startswith("lndsr_modlai_samples") ]
for f in filelist:
os.remove(os.path.join(landsatLAI,f))
def getLAI():
# Convert Landsat SR downloads to ENVI format
# Note: May be some warnings about unknown field - ignore.
print("Converting Landsat SR to ENVI format...")
geotiff2envi()
# Generate MODIS-Landsat samples for LAI computation
print("Generating MODIS-Landsat samples...")
sample()
# Compute Landsat LAI
print("Computing Landsat LAI...")
train()
compute()
def main():
# Get time and location from user
parser = argparse.ArgumentParser()
parser.add_argument("lat", type=float, help="latitude")
parser.add_argument("lon", type=float, help="longitude")
parser.add_argument("startDate", type=str, help="Start date yyyy-mm-dd")
parser.add_argument("endDate", type=str, help="Start date yyyy-mm-dd")
args = parser.parse_args()
loc = [args.lat,args.lon]
startDate = args.startDate
endDate = args.endDate
# set project base directory structure
#41.18,-96.43
# =====USGS credentials===============
# need to get this from pop up
usgsUser = str(getpass.getpass(prompt="usgs username:"))
if keyring.get_password("usgs",usgsUser)==None:
usgsPass = str(getpass.getpass(prompt="usgs password:"))
keyring.set_password("usgs",usgsUser,usgsPass)
else:
usgsPass = str(keyring.get_password("usgs",usgsUser))
# =====earthData credentials===============
earthLoginUser = str(getpass.getpass(prompt="earth login username:"))
if keyring.get_password("nasa",earthLoginUser)==None:
earthLoginPass = str(getpass.getpass(prompt="earth login password:"))
keyring.set_password("nasa",earthLoginUser,earthLoginPass)
else:
earthLoginPass = str(keyring.get_password("nasa",earthLoginUser))
#start Landsat order process
getLandsatData(loc,startDate,endDate,("%s"% usgsUser,"%s"% usgsPass))
# find MODIS tiles that cover landsat scene
# MODIS products
product = 'MCD15A3'
version = '005'
[v,h]= latlon2MODtile(args.lat,args.lon)
tiles = "h%02dv%02d" %(h,v)
#tiles = 'h10v04,h10v05'
# download MODIS LAI over the same area and time
print("Downloading MODIS data...")
getMODISlai(tiles,product,version,startDate,endDate,("%s"% earthLoginUser,"%s"% earthLoginPass))
# move surface relectance files and estimate get LAI
downloadFolder = os.path.join(base,'espa_downloads')
folders2move = glob.glob(os.path.join(downloadFolder ,'*'))
for i in range(len(folders2move)):
inputFN = folders2move[i]
sceneID = (inputFN).split(os.sep)[-1].split('-')[0]
scene = sceneID[3:9]
folder = os.path.join(landsatSR,scene)
if not os.path.exists(folder):
os.mkdir(folder)
for filename in glob.glob(os.path.join(inputFN, '*.*')):
shutil.copy(filename, folder)
if len(folders2move)>0:
#======Clean up folder===============================
shutil.rmtree(downloadFolder)
getLAI()
print("All done with LAI")
print("========================================")
print("==============process LST===============")
subprocess.call(["processlst","%s" % earthLoginUser,"%s" % earthLoginPass])
#shutil.rmtree(landsatTemp)
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, pycurl.error):
exit('Received Ctrl + C... Exiting! Bye.', 1)
| bsd-3-clause |
ml-ensemble/ml-ensemble.github.io | info/source/tutorials_source/parallel.py | 2 | 8200 | # -*- coding: utf-8 -*-
"""
.. _parallel_tutorial:
.. currentmodule: mlens.parallel
Parallel Mechanics
==================
ML-Ensemble is designed to provide an easy user interface. But it is also designed
to be extremely flexible, all the wile providing maximum concurrency at minimal
memory consumption. The lower-level API that builds the ensemble and manages the
computations is constructed in as modular a fashion as possible.
The low-level API introduces a computational graph-like environment that you can
directly exploit to gain further control over your ensemble. In fact, building
your ensemble through the low-level API is almost as straight forward as using the
high-level API. In this tutorial, we will walk through the core
:class:`ParallelProcessing` class.
The purpose of the :class:`ParallelProcessing` class is to provide a streamlined
interface for scheduling and allocating jobs in a nested sequence of tasks. The
typical case is a sequence of :class:`Layer` instances where the output of one layer
becomes the input to the next. While the layers must therefore be fitted sequentially,
each layer should be fitted in parallel. We might be interested in propagating some of the
features from one layer to the next, in which case we need to take care of the array allocation.
ParallelProcessing API
^^^^^^^^^^^^^^^^^^^^^^
Basic map
¨¨¨¨¨¨¨¨¨
In the simplest case, we have a ``caller`` that has a set of ``task``s that needs to be
evaluated in parallel. For instance, the ``caller`` might be a :class:`Learner`, with
each task being a fit job for a given cross-validation fold. In this simple case,
we want to perform an embarrassingly parallel for-loop of each fold, which we can
achieve with the ``map`` method of the :class:`ParallelProcessing` class.
"""
from mlens.parallel import ParallelProcessing, Job, Learner
from mlens.index import FoldIndex
from mlens.utils.dummy import OLS
import numpy as np
np.random.seed(2)
X = np.arange(20).reshape(10, 2)
y = np.random.rand(10)
indexer = FoldIndex(folds=2)
learner = Learner(estimator=OLS(),
indexer=indexer,
name='ols')
manager = ParallelProcessing(n_jobs=-1)
out = manager.map(learner, 'fit', X, y, return_preds=True)
print(out)
############################################################################
#
# Stacking a set of parallel jobs
# -------------------------------
#
# Suppose instead that we have a sequence of learners, where we want to fit
# each on the errors of the previous learner. We can achieve this by using
# ``stack`` method and a preprocessing pipeline for computing the errors.
# First, we need to construct a preprocessing class to transform the input,
# which will be the preceding learner's predictions, into errors.
from mlens.parallel import Transformer, Pipeline
from mlens.utils.dummy import Scale
from sklearn.base import BaseEstimator, TransformerMixin
def error_scorer(p, y):
return np.abs(p - y)
class Error(BaseEstimator, TransformerMixin):
"""Transformer that computes the errors of a base learners"""
def __init__(self, scorer):
self.scorer = scorer
def fit(self, X, y):
return self
def transform(self, X, y):
return self.scorer(X, y), y
############################################################################
# Now, we construct a sequence of tasks to compute, where the output of one
# task will be the input to the next. Hence, we want a sequence of the form
# ``[learner, transformer, ..., learner]``:
tasks = []
for i in range(3):
if i != 0:
pipeline = Pipeline([('err', Error(error_scorer))], return_y=True)
transformer = Transformer(
estimator=pipeline,
indexer=indexer,
name='sc-%i' % (i + 1)
)
tasks.append(transformer)
learner = Learner(
estimator=OLS(),
preprocess='sc-%i' % (i+1) if i != 0 else None,
indexer=indexer,
name='ols-%i' % (i + 1)
)
tasks.append(learner)
############################################################################
# To fit the stack, we call the ``stack`` method on the ``manager``, and since
# each learner must have access to their transformer, we set ``split=False``
# (otherwise each task will have a separate sub-cache, sealing them off
# from each other).
out = manager.stack(
tasks, 'fit', X, y, return_preds=True, split=False)
print(out)
############################################################################
# If we instead want to append these errors as features, we can simply
# alter our transformer to concatenate the errors to the original data.
# Alternatively, we can automate the process by instead using the
# :class:`mlens.ensemble.Sequential` API.
############################################################################
#
# Manual initialization and processing
# ------------------------------------
#
# Under the hood, both ``map`` and ``stack`` first call ``initialize`` on the
# ``manager``, followed by a call to ``process`` with some default arguments.
# For maximum control, we can manually do the initialization and processing step.
# When we initialize, an instance of :class:`Job` is created that collect arguments
# relevant for of the job as well as handles for data to be used. For instance,
# we can specify that we want the predictions of all layers, as opposed to just the
# final layer:
out = manager.initialize(
'fit', X, y, None, return_preds=['ols-1', 'ols-3'], stack=True, split=False)
############################################################################
# The ``initialize`` method primarily allocates memory of input data and
# puts it on the ``job`` instance. Not that if the input is a string pointing
# to data on disk, ``initialize`` will attempt to load the data into memory.
# If the backend of the manger is ``threading``, keeping the data on the parent
# process is sufficient for workers to reach it. With ``multiprocessing`` as
# the backend, data will be memory-mapped to avoid serialization.
############################################################################
# The ``initialize`` method returns an ``out`` dictionary that specified
# what type of output we want when running the manager on the assigned job.
# To run the manager, we call ``process`` with out ``out`` pointer:
out = manager.process(tasks, out)
print(out)
############################################################################
# The output now is a list of arrays, the first contains the same predictions
# as we got in the ``map`` call, the last is the equivalent to the predicitons
# we got in the ``stack`` call. Note that this functionality is available
# also in the ``stack`` and ``map`` calls.
############################################################################
#
# Memory management
# -----------------
#
# When running the manager, it will read and write to memory buffers. This is
# less of a concern when the ``threading`` backend is used, as data is kept
# in the parent process. But when data is loaded from file path, or when
# ``multiprocessing`` is used, we want to clean up after us. Thus, when we
# are through with the ``manager``, it is important to call the ``clear``
# method. This will however destroy any ephemeral data stored on the instance.
manager.clear()
############################################################################
#
# ..warning:: The ``clear`` method will remove any files in the specified path.
# If the path specified in the ``initialize`` call includes files other than
# those generated in the ``process`` call, these will ALSO be removed.
# ALWAYS use a clean temporary cache for processing jobs.
############################################################################
# To minimize the risk of forgetting this last step, the :class:`ParallelProcessing`
# class can be used as context manager, automatically cleaning up the cache
# when exiting the context:
learner = Learner(estimator=OLS(), indexer=indexer)
with ParallelProcessing() as mananger:
manager.stack(learner, 'fit', X, y, split=False)
out = manager.stack(learner, 'predict', X, split=False)
| mit |
psav/cfme_tests | cfme/utils/smem_memory_monitor.py | 1 | 67380 | """Monitor Memory on a CFME/Miq appliance and builds report&graphs displaying usage per process."""
import json
import time
import traceback
from collections import OrderedDict
from datetime import datetime
from threading import Thread
import os
import six
import yaml
from cycler import cycler
from yaycl import AttrDict
from cfme.utils.conf import cfme_performance
from cfme.utils.log import logger
from cfme.utils.path import results_path
from cfme.utils.version import current_version
from cfme.utils.version import get_version
miq_workers = [
'MiqGenericWorker',
'MiqPriorityWorker',
'MiqScheduleWorker',
'MiqUiWorker',
'MiqWebServiceWorker',
'MiqWebsocketWorker',
'MiqReportingWorker',
'MiqReplicationWorker',
'MiqSmartProxyWorker',
'MiqVimBrokerWorker',
'MiqEmsRefreshCoreWorker',
# Refresh Workers:
'ManageIQ::Providers::Microsoft::InfraManager::RefreshWorker',
'ManageIQ::Providers::Openstack::InfraManager::RefreshWorker',
'ManageIQ::Providers::Redhat::InfraManager::RefreshWorker',
'ManageIQ::Providers::Vmware::InfraManager::RefreshWorker',
'MiqEmsRefreshWorkerMicrosoft', # 5.4
'MiqEmsRefreshWorkerRedhat', # 5.4
'MiqEmsRefreshWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::RefreshWorker',
'ManageIQ::Providers::Azure::CloudManager::RefreshWorker',
'ManageIQ::Providers::Google::CloudManager::RefreshWorker',
'ManageIQ::Providers::Openstack::CloudManager::RefreshWorker',
'MiqEmsRefreshWorkerAmazon', # 5.4
'MiqEmsRefreshWorkerOpenstack', # 5.4
'ManageIQ::Providers::AnsibleTower::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ProvisioningManager::RefreshWorker',
'MiqEmsRefreshWorkerForemanConfiguration', # 5.4
'MiqEmsRefreshWorkerForemanProvisioning', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::RefreshWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Openshift::ContainerManager::RefreshWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::CinderManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::SwiftManager::RefreshWorker',
'ManageIQ::Providers::Amazon::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Azure::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Google::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Openstack::NetworkManager::RefreshWorker',
'MiqNetappRefreshWorker',
'MiqSmisRefreshWorker',
# Event Workers:
'MiqEventHandler',
'ManageIQ::Providers::Openstack::InfraManager::EventCatcher',
'ManageIQ::Providers::StorageManager::CinderManager::EventCatcher',
'ManageIQ::Providers::Redhat::InfraManager::EventCatcher',
'ManageIQ::Providers::Vmware::InfraManager::EventCatcher',
'MiqEventCatcherRedhat', # 5.4
'MiqEventCatcherVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::EventCatcher',
'ManageIQ::Providers::Azure::CloudManager::EventCatcher',
'ManageIQ::Providers::Google::CloudManager::EventCatcher',
'ManageIQ::Providers::Openstack::CloudManager::EventCatcher',
'MiqEventCatcherAmazon', # 5.4
'MiqEventCatcherOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::EventCatcher',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Kubernetes::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openshift::ContainerManager::EventCatcher',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openstack::NetworkManager::EventCatcher',
# Metrics Processor/Collector Workers
'MiqEmsMetricsProcessorWorker',
'ManageIQ::Providers::Openstack::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Redhat::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Vmware::InfraManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerRedhat', # 5.4
'MiqEmsMetricsCollectorWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Azure::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::CloudManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerAmazon', # 5.4
'MiqEmsMetricsCollectorWorkerOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openshift::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::NetworkManager::MetricsCollectorWorker',
'MiqStorageMetricsCollectorWorker',
'MiqVmdbStorageBridgeWorker']
ruby_processes = list(miq_workers)
ruby_processes.extend(['evm:dbsync:replicate', 'MIQ Server (evm_server.rb)', 'evm_watchdog.rb',
'appliance_console.rb'])
process_order = list(ruby_processes)
process_order.extend(['memcached', 'postgres', 'httpd', 'collectd'])
# Timestamp created at first import, thus grouping all reports of like workload
test_ts = time.strftime('%Y%m%d%H%M%S')
# 10s sample interval (occasionally sampling can take almost 4s on an appliance doing a lot of work)
SAMPLE_INTERVAL = 10
class SmemMemoryMonitor(Thread):
def __init__(self, ssh_client, scenario_data):
super(SmemMemoryMonitor, self).__init__()
self.ssh_client = ssh_client
self.scenario_data = scenario_data
self.grafana_urls = {}
self.miq_server_id = ''
self.use_slab = False
self.signal = True
def create_process_result(self, process_results, starttime, process_pid, process_name,
memory_by_pid):
if process_pid in memory_by_pid.keys():
if process_name not in process_results:
process_results[process_name] = OrderedDict()
process_results[process_name][process_pid] = OrderedDict()
if process_pid not in process_results[process_name]:
process_results[process_name][process_pid] = OrderedDict()
process_results[process_name][process_pid][starttime] = {}
rss_mem = memory_by_pid[process_pid]['rss']
pss_mem = memory_by_pid[process_pid]['pss']
uss_mem = memory_by_pid[process_pid]['uss']
vss_mem = memory_by_pid[process_pid]['vss']
swap_mem = memory_by_pid[process_pid]['swap']
process_results[process_name][process_pid][starttime]['rss'] = rss_mem
process_results[process_name][process_pid][starttime]['pss'] = pss_mem
process_results[process_name][process_pid][starttime]['uss'] = uss_mem
process_results[process_name][process_pid][starttime]['vss'] = vss_mem
process_results[process_name][process_pid][starttime]['swap'] = swap_mem
del memory_by_pid[process_pid]
else:
logger.warn('Process {} PID, not found: {}'.format(process_name, process_pid))
def get_appliance_memory(self, appliance_results, plottime):
# 5.5/5.6 - RHEL 7 / Centos 7
# Application Memory Used : MemTotal - (MemFree + Slab + Cached)
# 5.4 - RHEL 6 / Centos 6
# Application Memory Used : MemTotal - (MemFree + Buffers + Cached)
# Available memory could potentially be better metric
appliance_results[plottime] = {}
result = self.ssh_client.run_command('cat /proc/meminfo')
if result.failed:
logger.error('Exit_status nonzero in get_appliance_memory: {}, {}'
.format(result.rc, result.output))
del appliance_results[plottime]
else:
meminfo_raw = result.output.replace('kB', '').strip()
meminfo = OrderedDict((k.strip(), v.strip()) for k, v in
(value.strip().split(':') for value in meminfo_raw.split('\n')))
appliance_results[plottime]['total'] = float(meminfo['MemTotal']) / 1024
appliance_results[plottime]['free'] = float(meminfo['MemFree']) / 1024
if 'MemAvailable' in meminfo: # 5.5, RHEL 7/Centos 7
self.use_slab = True
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Slab']) + float(meminfo['Cached']))) / 1024
else: # 5.4, RHEL 6/Centos 6
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Buffers']) + float(meminfo['Cached']))) / 1024
appliance_results[plottime]['used'] = mem_used
appliance_results[plottime]['buffers'] = float(meminfo['Buffers']) / 1024
appliance_results[plottime]['cached'] = float(meminfo['Cached']) / 1024
appliance_results[plottime]['slab'] = float(meminfo['Slab']) / 1024
appliance_results[plottime]['swap_total'] = float(meminfo['SwapTotal']) / 1024
appliance_results[plottime]['swap_free'] = float(meminfo['SwapFree']) / 1024
def get_evm_workers(self):
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c '
'\"select pid,type from miq_workers where miq_server_id = \'{}\'\"'.format(
self.miq_server_id))
if result.output.strip():
workers = {}
for worker in result.output.strip().split('\n'):
pid_worker = worker.strip().split('|')
if len(pid_worker) == 2:
workers[pid_worker[0].strip()] = pid_worker[1].strip()
else:
logger.error('Unexpected output from psql: {}'.format(worker))
return workers
else:
return {}
# Old method of obtaining per process memory (Appliances without smem)
# def get_pids_memory(self):
# result = self.ssh_client.run_command(
# 'ps -A -o pid,rss,vsz,comm,cmd | sed 1d')
# pids_memory = result.output.strip().split('\n')
# memory_by_pid = {}
# for line in pids_memory:
# values = [s for s in line.strip().split(' ') if s]
# pid = values[0]
# memory_by_pid[pid] = {}
# memory_by_pid[pid]['rss'] = float(values[1]) / 1024
# memory_by_pid[pid]['vss'] = float(values[2]) / 1024
# memory_by_pid[pid]['name'] = values[3]
# memory_by_pid[pid]['cmd'] = ' '.join(values[4:])
# return memory_by_pid
def get_miq_server_id(self):
# Obtain the Miq Server GUID:
result = self.ssh_client.run_command('cat /var/www/miq/vmdb/GUID')
logger.info('Obtained appliance GUID: {}'.format(result.output.strip()))
# Get server id:
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c "select id from miq_servers where guid = \'{}\'"'
''.format(result.output.strip()))
logger.info('Obtained miq_server_id: {}'.format(result.output.strip()))
self.miq_server_id = result.output.strip()
def get_pids_memory(self):
result = self.ssh_client.run_command(
'smem -c \'pid rss pss uss vss swap name command\' | sed 1d')
pids_memory = result.output.strip().split('\n')
memory_by_pid = {}
for line in pids_memory:
if line.strip():
try:
values = [s for s in line.strip().split(' ') if s]
pid = values[0]
int(pid)
memory_by_pid[pid] = {}
memory_by_pid[pid]['rss'] = float(values[1]) / 1024
memory_by_pid[pid]['pss'] = float(values[2]) / 1024
memory_by_pid[pid]['uss'] = float(values[3]) / 1024
memory_by_pid[pid]['vss'] = float(values[4]) / 1024
memory_by_pid[pid]['swap'] = float(values[5]) / 1024
memory_by_pid[pid]['name'] = values[6]
memory_by_pid[pid]['cmd'] = ' '.join(values[7:])
except Exception as e:
logger.error('Processing smem output error: {}'.format(e.__class__.__name__, e))
logger.error('Issue with pid: {} line: {}'.format(pid, line))
logger.error('Complete smem output: {}'.format(result.output))
return memory_by_pid
def _real_run(self):
""" Result dictionaries:
appliance_results[timestamp][measurement] = value
appliance_results[timestamp]['total'] = value
appliance_results[timestamp]['free'] = value
appliance_results[timestamp]['used'] = value
appliance_results[timestamp]['buffers'] = value
appliance_results[timestamp]['cached'] = value
appliance_results[timestamp]['slab'] = value
appliance_results[timestamp]['swap_total'] = value
appliance_results[timestamp]['swap_free'] = value
appliance measurements: total/free/used/buffers/cached/slab/swap_total/swap_free
process_results[name][pid][timestamp][measurement] = value
process_results[name][pid][timestamp]['rss'] = value
process_results[name][pid][timestamp]['pss'] = value
process_results[name][pid][timestamp]['uss'] = value
process_results[name][pid][timestamp]['vss'] = value
process_results[name][pid][timestamp]['swap'] = value
"""
appliance_results = OrderedDict()
process_results = OrderedDict()
install_smem(self.ssh_client)
self.get_miq_server_id()
logger.info('Starting Monitoring Thread.')
while self.signal:
starttime = time.time()
plottime = datetime.now()
self.get_appliance_memory(appliance_results, plottime)
workers = self.get_evm_workers()
memory_by_pid = self.get_pids_memory()
for worker_pid in workers:
self.create_process_result(process_results, plottime, worker_pid,
workers[worker_pid], memory_by_pid)
for pid in sorted(memory_by_pid.keys()):
if memory_by_pid[pid]['name'] == 'httpd':
self.create_process_result(process_results, plottime, pid, 'httpd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postgres':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postmaster':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'memcached':
self.create_process_result(process_results, plottime, pid, 'memcached',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'collectd':
self.create_process_result(process_results, plottime, pid, 'collectd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'ruby':
if 'evm_server.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'MIQ Server' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'evm_watchdog.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm_watchdog.rb', memory_by_pid)
elif 'appliance_console.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'appliance_console.rb', memory_by_pid)
elif 'evm:dbsync:replicate' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm:dbsync:replicate', memory_by_pid)
else:
logger.debug('Unaccounted for ruby pid: {}'.format(pid))
timediff = time.time() - starttime
logger.debug('Monitoring sampled in {}s'.format(round(timediff, 4)))
# Sleep Monitoring interval
# Roughly 10s samples, accounts for collection of memory measurements
time_to_sleep = abs(SAMPLE_INTERVAL - timediff)
time.sleep(time_to_sleep)
logger.info('Monitoring CFME Memory Terminating')
create_report(self.scenario_data, appliance_results, process_results, self.use_slab,
self.grafana_urls)
def run(self):
try:
self._real_run()
except Exception as e:
logger.error('Error in Monitoring Thread: {}'.format(e))
logger.error('{}'.format(traceback.format_exc()))
def install_smem(ssh_client):
# smem is included by default in 5.6 appliances
logger.info('Installing smem.')
ver = get_version()
if ver == '55':
ssh_client.run_command('rpm -i {}'.format(cfme_performance['tools']['rpms']['epel7_rpm']))
ssh_client.run_command('yum install -y smem')
# Patch smem to display longer command line names
logger.info('Patching smem')
ssh_client.run_command('sed -i s/\.27s/\.200s/g /usr/bin/smem')
def create_report(scenario_data, appliance_results, process_results, use_slab, grafana_urls):
logger.info('Creating Memory Monitoring Report.')
ver = current_version()
provider_names = 'No Providers'
if 'providers' in scenario_data['scenario']:
provider_names = ', '.join(scenario_data['scenario']['providers'])
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
if not os.path.exists(str(workload_path)):
os.makedirs(str(workload_path))
scenario_path = workload_path.join(scenario_data['scenario']['name'])
if os.path.exists(str(scenario_path)):
logger.warn('Duplicate Workload-Scenario Name: {}'.format(scenario_path))
scenario_path = workload_path.join('{}-{}'.format(time.strftime('%Y%m%d%H%M%S'),
scenario_data['scenario']['name']))
logger.warn('Using: {}'.format(scenario_path))
os.mkdir(str(scenario_path))
mem_graphs_path = scenario_path.join('graphs')
if not os.path.exists(str(mem_graphs_path)):
os.mkdir(str(mem_graphs_path))
mem_rawdata_path = scenario_path.join('rawdata')
if not os.path.exists(str(mem_rawdata_path)):
os.mkdir(str(mem_rawdata_path))
graph_appliance_measurements(mem_graphs_path, ver, appliance_results, use_slab, provider_names)
graph_individual_process_measurements(mem_graphs_path, process_results, provider_names)
graph_same_miq_workers(mem_graphs_path, process_results, provider_names)
graph_all_miq_workers(mem_graphs_path, process_results, provider_names)
# Dump scenario Yaml:
with open(str(scenario_path.join('scenario.yml')), 'w') as scenario_file:
yaml.dump(dict(scenario_data['scenario']), scenario_file, default_flow_style=False)
generate_summary_csv(scenario_path.join('{}-summary.csv'.format(ver)), appliance_results,
process_results, provider_names, ver)
generate_raw_data_csv(mem_rawdata_path, appliance_results, process_results)
generate_summary_html(scenario_path, ver, appliance_results, process_results, scenario_data,
provider_names, grafana_urls)
generate_workload_html(scenario_path, ver, scenario_data, provider_names, grafana_urls)
logger.info('Finished Creating Report')
def compile_per_process_results(procs_to_compile, process_results, ts_end):
alive_pids = 0
recycled_pids = 0
total_running_rss = 0
total_running_pss = 0
total_running_uss = 0
total_running_vss = 0
total_running_swap = 0
for process in procs_to_compile:
if process in process_results:
for pid in process_results[process]:
if ts_end in process_results[process][pid]:
alive_pids += 1
total_running_rss += process_results[process][pid][ts_end]['rss']
total_running_pss += process_results[process][pid][ts_end]['pss']
total_running_uss += process_results[process][pid][ts_end]['uss']
total_running_vss += process_results[process][pid][ts_end]['vss']
total_running_swap += process_results[process][pid][ts_end]['swap']
else:
recycled_pids += 1
return alive_pids, recycled_pids, total_running_rss, total_running_pss, total_running_uss, \
total_running_vss, total_running_swap
def generate_raw_data_csv(directory, appliance_results, process_results):
starttime = time.time()
file_name = str(directory.join('appliance.csv'))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,Total,Free,Used,Buffers,Cached,Slab,Swap_Total,Swap_Free\n')
for ts in appliance_results:
csv_file.write('{},{},{},{},{},{},{},{},{}\n'.format(ts,
appliance_results[ts]['total'], appliance_results[ts]['free'],
appliance_results[ts]['used'], appliance_results[ts]['buffers'],
appliance_results[ts]['cached'], appliance_results[ts]['slab'],
appliance_results[ts]['swap_total'], appliance_results[ts]['swap_free']))
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = str(directory.join('{}-{}.csv'.format(process_pid, process_name)))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,RSS,PSS,USS,VSS,SWAP\n')
for ts in process_results[process_name][process_pid]:
csv_file.write('{},{},{},{},{},{}\n'.format(ts,
process_results[process_name][process_pid][ts]['rss'],
process_results[process_name][process_pid][ts]['pss'],
process_results[process_name][process_pid][ts]['uss'],
process_results[process_name][process_pid][ts]['vss'],
process_results[process_name][process_pid][ts]['swap']))
timediff = time.time() - starttime
logger.info('Generated Raw Data CSVs in: {}'.format(timediff))
def generate_summary_csv(file_name, appliance_results, process_results, provider_names,
version_string):
starttime = time.time()
with open(str(file_name), 'w') as csv_file:
csv_file.write('Version: {}, Provider(s): {}\n'.format(version_string, provider_names))
csv_file.write('Measurement,Start of test,End of test\n')
start = appliance_results.keys()[0]
end = appliance_results.keys()[-1]
csv_file.write('Appliance Total Memory,{},{}\n'.format(
round(appliance_results[start]['total'], 2), round(appliance_results[end]['total'], 2)))
csv_file.write('Appliance Free Memory,{},{}\n'.format(
round(appliance_results[start]['free'], 2), round(appliance_results[end]['free'], 2)))
csv_file.write('Appliance Used Memory,{},{}\n'.format(
round(appliance_results[start]['used'], 2), round(appliance_results[end]['used'], 2)))
csv_file.write('Appliance Buffers,{},{}\n'.format(
round(appliance_results[start]['buffers'], 2),
round(appliance_results[end]['buffers'], 2)))
csv_file.write('Appliance Cached,{},{}\n'.format(
round(appliance_results[start]['cached'], 2),
round(appliance_results[end]['cached'], 2)))
csv_file.write('Appliance Slab,{},{}\n'.format(
round(appliance_results[start]['slab'], 2),
round(appliance_results[end]['slab'], 2)))
csv_file.write('Appliance Total Swap,{},{}\n'.format(
round(appliance_results[start]['swap_total'], 2),
round(appliance_results[end]['swap_total'], 2)))
csv_file.write('Appliance Free Swap,{},{}\n'.format(
round(appliance_results[start]['swap_free'], 2),
round(appliance_results[end]['swap_free'], 2)))
summary_csv_measurement_dump(csv_file, process_results, 'rss')
summary_csv_measurement_dump(csv_file, process_results, 'pss')
summary_csv_measurement_dump(csv_file, process_results, 'uss')
summary_csv_measurement_dump(csv_file, process_results, 'vss')
summary_csv_measurement_dump(csv_file, process_results, 'swap')
timediff = time.time() - starttime
logger.info('Generated Summary CSV in: {}'.format(timediff))
def generate_summary_html(directory, version_string, appliance_results, process_results,
scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('index.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {} Memory Usage Performance</title></head>'.format(
version_string, provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(version_string,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names))
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(version_string))
html_file.write(' : <b><a href=\'workload.html\'>Workload Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
start = appliance_results.keys()[0]
end = appliance_results.keys()[-1]
timediff = end - start
total_proc_count = 0
for proc_name in process_results:
total_proc_count += len(process_results[proc_name].keys())
growth = appliance_results[end]['used'] - appliance_results[start]['used']
max_used_memory = 0
for ts in appliance_results:
if appliance_results[ts]['used'] > max_used_memory:
max_used_memory = appliance_results[ts]['used']
html_file.write('<table border="1">\n')
html_file.write('<tr><td>\n')
# Appliance Wide Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Version</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Total Test Time</b></td>\n')
html_file.write('<td><b>Total Memory</b></td>\n')
html_file.write('<td><b>Start Used Memory</b></td>\n')
html_file.write('<td><b>End Used Memory</b></td>\n')
html_file.write('<td><b>Used Memory Growth</b></td>\n')
html_file.write('<td><b>Max Used Memory</b></td>\n')
html_file.write('<td><b>Total Tracked Processes</b></td>\n')
html_file.write('</tr>\n')
html_file.write('<td><a href=\'rawdata/appliance.csv\'>{}</a></td>\n'.format(
version_string))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(unicode(timediff).partition('.')[0]))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['total'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[start]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(growth, 2)))
html_file.write('<td>{}</td>\n'.format(round(max_used_memory, 2)))
html_file.write('<td>{}</td>\n'.format(total_proc_count))
html_file.write('</table>\n')
# CFME/Miq Worker Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Total CFME/Miq Workers</b></td>\n')
html_file.write('<td><b>End Running Workers</b></td>\n')
html_file.write('<td><b>Recycled Workers</b></td>\n')
html_file.write('<td><b>End Total Worker RSS</b></td>\n')
html_file.write('<td><b>End Total Worker PSS</b></td>\n')
html_file.write('<td><b>End Total Worker USS</b></td>\n')
html_file.write('<td><b>End Total Worker VSS</b></td>\n')
html_file.write('<td><b>End Total Worker SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
miq_workers, process_results, end)
html_file.write('<tr>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Per Process Summaries:
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Application/Process Group</b></td>\n')
html_file.write('<td><b>Total Processes</b></td>\n')
html_file.write('<td><b>End Running Processes</b></td>\n')
html_file.write('<td><b>Recycled Processes</b></td>\n')
html_file.write('<td><b>End Total Process RSS</b></td>\n')
html_file.write('<td><b>End Total Process PSS</b></td>\n')
html_file.write('<td><b>End Total Process USS</b></td>\n')
html_file.write('<td><b>End Total Process VSS</b></td>\n')
html_file.write('<td><b>End Total Process SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
ruby_processes, process_results, end)
t_a_pids = a_pids
t_r_pids = r_pids
tt_rss = t_rss
tt_pss = t_pss
tt_uss = t_uss
tt_vss = t_vss
tt_swap = t_swap
html_file.write('<tr>\n')
html_file.write('<td>ruby</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# memcached Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['memcached'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>memcached</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# Postgres Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['postgres'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>postgres</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# httpd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(['httpd'],
process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>httpd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# collectd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['collectd'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>collectd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write('<td>{}</td>\n'.format(a_pids))
html_file.write('<td>{}</td>\n'.format(r_pids))
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>total</td>\n')
html_file.write('<td>{}</td>\n'.format(t_a_pids + t_r_pids))
html_file.write('<td>{}</td>\n'.format(t_a_pids))
html_file.write('<td>{}</td>\n'.format(t_r_pids))
html_file.write('<td>{}</td>\n'.format(round(tt_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Appliance Graph
html_file.write('</td></tr><tr><td>\n')
file_name = '{}-appliance_memory.png'.format(version_string)
html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name))
file_name = '{}-appliance_swap.png'.format(version_string)
# Check for swap usage through out time frame:
max_swap_used = 0
for ts in appliance_results:
swap_used = appliance_results[ts]['swap_total'] - appliance_results[ts]['swap_free']
if swap_used > max_swap_used:
max_swap_used = swap_used
if max_swap_used < 10: # Less than 10MiB Max, then hide graph
html_file.write('<br><a href=\'graphs/{}\'>Swap Graph '.format(file_name))
html_file.write('(Hidden, max_swap_used < 10 MiB)</a>\n')
else:
html_file.write('<img src=\'graphs/{}\'>\n'.format(file_name))
html_file.write('</td></tr><tr><td>\n')
# Per Process Results
html_file.write('<table style="width:100%" border="1"><tr>\n')
html_file.write('<td><b>Process Name</b></td>\n')
html_file.write('<td><b>Process Pid</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Time Alive</b></td>\n')
html_file.write('<td><b>RSS Mem Start</b></td>\n')
html_file.write('<td><b>RSS Mem End</b></td>\n')
html_file.write('<td><b>RSS Mem Change</b></td>\n')
html_file.write('<td><b>PSS Mem Start</b></td>\n')
html_file.write('<td><b>PSS Mem End</b></td>\n')
html_file.write('<td><b>PSS Mem Change</b></td>\n')
html_file.write('<td><b>CSV</b></td>\n')
html_file.write('</tr>\n')
# By Worker Type Memory Used
for ordered_name in process_order:
if ordered_name in process_results:
for pid in process_results[ordered_name]:
start = process_results[ordered_name][pid].keys()[0]
end = process_results[ordered_name][pid].keys()[-1]
timediff = end - start
html_file.write('<tr>\n')
if len(process_results[ordered_name]) > 1:
html_file.write('<td><a href=\'#{}\'>{}</a></td>\n'.format(ordered_name,
ordered_name))
html_file.write('<td><a href=\'graphs/{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
else:
html_file.write('<td>{}</td>\n'.format(ordered_name))
html_file.write('<td><a href=\'#{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(unicode(timediff).partition('.')[0]))
rss_change = process_results[ordered_name][pid][end]['rss'] - \
process_results[ordered_name][pid][start]['rss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(rss_change, 2)))
pss_change = process_results[ordered_name][pid][end]['pss'] - \
process_results[ordered_name][pid][start]['pss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(pss_change, 2)))
html_file.write('<td><a href=\'rawdata/{}-{}.csv\'>csv</a></td>\n'.format(
pid, ordered_name))
html_file.write('</tr>\n')
else:
logger.debug('Process/Worker not part of test: {}'.format(ordered_name))
html_file.write('</table>\n')
# Worker Graphs
for ordered_name in process_order:
if ordered_name in process_results:
html_file.write('<tr><td>\n')
html_file.write('<div id=\'{}\'>Process name: {}</div><br>\n'.format(
ordered_name, ordered_name))
if len(process_results[ordered_name]) > 1:
file_name = '{}-all.png'.format(ordered_name)
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(file_name,
file_name))
else:
for pid in sorted(process_results[ordered_name]):
file_name = '{}-{}.png'.format(ordered_name, pid)
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(
file_name, file_name))
html_file.write('</td></tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info('Generated Summary html in: {}'.format(timediff))
def generate_workload_html(directory, ver, scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('workload.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {}</title></head>'.format(
scenario_data['test_name'], provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(ver,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write('<b>Provider(s):</b> {}<br>\n'.format(provider_names))
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write('<b><a href=\'{}-summary.csv\'>Summary CSV</a></b>'.format(ver))
html_file.write(' : <b><a href=\'index.html\'>Memory Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
html_file.write('<br><b>Scenario Data: </b><br>\n')
yaml_html = get_scenario_html(scenario_data['scenario'])
html_file.write(yaml_html + '\n')
html_file.write('<br>\n<br>\n<br>\n<b>Quantifier Data: </b>\n<br>\n<br>\n<br>\n<br>\n')
html_file.write('<table border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> System Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
system_path = ('../version_info/system.csv')
html_file.write('<a href="{}" download="System_Versions-{}-{}"> System Versions</a>'
.format(system_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Process Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
process_path = ('../version_info/processes.csv')
html_file.write('<a href="{}" download="Process_Versions-{}-{}"> Process Versions</a>'
.format(process_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Ruby Gem Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
gems_path = ('../version_info/gems.csv')
html_file.write('<a href="{}" download="Gem_Versions-{}-{}"> Ruby Gem Versions</a>'
.format(gems_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> RPM Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
rpms_path = ('../version_info/rpms.csv')
html_file.write('<a href="{}" download="RPM_Versions-{}-{}"> RPM Versions</a>'
.format(rpms_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info('Generated Workload html in: {}'.format(timediff))
def add_workload_quantifiers(quantifiers, scenario_data):
starttime = time.time()
ver = current_version()
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
directory = workload_path.join(scenario_data['scenario']['name'])
file_name = str(directory.join('workload.html'))
marker = '<b>Quantifier Data: </b>'
yaml_dict = quantifiers
yaml_string = str(json.dumps(yaml_dict, indent=4))
yaml_html = yaml_string.replace('\n', '<br>\n')
with open(file_name, 'r+') as html_file:
line = ''
while marker not in line:
line = html_file.readline()
marker_pos = html_file.tell()
remainder = html_file.read()
html_file.seek(marker_pos)
html_file.write('{} \n'.format(yaml_html))
html_file.write(remainder)
timediff = time.time() - starttime
logger.info('Added quantifiers in: {}'.format(timediff))
def get_scenario_html(scenario_data):
scenario_dict = create_dict(scenario_data)
scenario_yaml = yaml.dump(scenario_dict)
scenario_html = scenario_yaml.replace('\n', '<br>\n')
scenario_html = scenario_html.replace(', ', '<br>\n - ')
scenario_html = scenario_html.replace(' ', ' ')
scenario_html = scenario_html.replace('[', '<br>\n - ')
scenario_html = scenario_html.replace(']', '\n')
return scenario_html
def create_dict(attr_dict):
main_dict = dict(attr_dict)
for key, value in six.iteritems(main_dict):
if type(value) == AttrDict:
main_dict[key] = create_dict(value)
return main_dict
def graph_appliance_measurements(graphs_path, ver, appliance_results, use_slab, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
dates = appliance_results.keys()
total_memory_list = list(appliance_results[ts]['total'] for ts in appliance_results.keys())
free_memory_list = list(appliance_results[ts]['free'] for ts in appliance_results.keys())
used_memory_list = list(appliance_results[ts]['used'] for ts in appliance_results.keys())
buffers_memory_list = list(
appliance_results[ts]['buffers'] for ts in appliance_results.keys())
cache_memory_list = list(appliance_results[ts]['cached'] for ts in appliance_results.keys())
slab_memory_list = list(appliance_results[ts]['slab'] for ts in appliance_results.keys())
swap_total_list = list(appliance_results[ts]['swap_total'] for ts in
appliance_results.keys())
swap_free_list = list(appliance_results[ts]['swap_free'] for ts in appliance_results.keys())
# Stack Plot Memory Usage
file_name = graphs_path.join('{}-appliance_memory.png'.format(ver))
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'coral', 'steelblue',
'forestgreen'])
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAppliance Memory'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
if use_slab:
y = [used_memory_list, slab_memory_list, cache_memory_list, free_memory_list]
else:
y = [used_memory_list, buffers_memory_list, cache_memory_list, free_memory_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(total_memory_list[0], 2)), xy=(dates[0], total_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(total_memory_list[-1], 2)), xy=(dates[-1], total_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
if use_slab:
ax.annotate(str(round(slab_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(slab_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] +
slab_memory_list[-1]), xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + slab_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
else:
ax.annotate(str(round(buffers_memory_list[0], 2)), xy=(
dates[0], used_memory_list[0] + buffers_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(buffers_memory_list[-1], 2)), xy=(dates[-1],
used_memory_list[-1] + buffers_memory_list[-1]), xytext=(4, -4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
buffers_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + buffers_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[0], 2)), xy=(dates[0], used_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='coral')
p3 = plt.Rectangle((0, 0), 1, 1, fc='steelblue')
p4 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
if use_slab:
ax.legend([p1, p2, p3, p4], ['Used', 'Slab', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
else:
ax.legend([p1, p2, p3, p4], ['Used', 'Buffers', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Stack Plot Swap usage
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'forestgreen'])
file_name = graphs_path.join('{}-appliance_swap.png'.format(ver))
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAppliance Swap'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Swap (MiB)')
swap_used_list = [t - f for f, t in zip(swap_free_list, swap_total_list)]
y = [swap_used_list, swap_free_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(swap_total_list[0], 2)), xy=(dates[0], swap_total_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_total_list[-1], 2)), xy=(dates[-1], swap_total_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[0], 2)), xy=(dates[0], swap_used_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[-1], 2)), xy=(dates[-1], swap_used_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
ax.legend([p1, p2], ['Used Swap', 'Free Swap'], bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Reset Colors
mpl.rcdefaults()
timediff = time.time() - starttime
logger.info('Plotted Appliance Memory in: {}'.format(timediff))
def graph_all_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
file_name = graph_file_path.join('all-processes.png')
fig, ax = plt.subplots()
plt.title('Provider(s): {}\nAll Workers/Monitored Processes'.format(provider_names))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_name in process_results:
if 'Worker' in process_name or 'Handler' in process_name or 'Catcher' in process_name:
for process_pid in process_results[process_name]:
dates = process_results[process_name][process_pid].keys()
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label='{} {} RSS'.format(process_pid,
process_name))
plt.plot(dates, vss_samples, linewidth=1, label='{} {} VSS'.format(
process_pid, process_name))
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted All Type/Process Memory in: {}'.format(timediff))
def graph_individual_process_measurements(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = graph_file_path.join('{}-{}.png'.format(process_name, process_pid))
dates = process_results[process_name][process_pid].keys()
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
fig, ax = plt.subplots()
plt.title('Provider(s)/Size: {}\nProcess/Worker: {}\nPID: {}'.format(provider_names,
process_name, process_pid))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
plt.plot(dates, rss_samples, linewidth=1, label='RSS')
plt.plot(dates, pss_samples, linewidth=1, label='PSS')
plt.plot(dates, uss_samples, linewidth=1, label='USS')
plt.plot(dates, vss_samples, linewidth=1, label='VSS')
plt.plot(dates, swap_samples, linewidth=1, label='Swap')
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted Individual Process Memory in: {}'.format(timediff))
def graph_same_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
if len(process_results[process_name]) > 1:
logger.debug('Plotting {} {} processes on single graph.'.format(
len(process_results[process_name]), process_name))
file_name = graph_file_path.join('{}-all.png'.format(process_name))
fig, ax = plt.subplots()
pids = 'PIDs: '
for i, pid in enumerate(process_results[process_name], 1):
pids = '{}{}'.format(pids, '{},{}'.format(pid, [' ', '\n'][i % 6 == 0]))
pids = pids[0:-2]
plt.title('Provider: {}\nProcess/Worker: {}\n{}'.format(provider_names,
process_name, pids))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_pid in process_results[process_name]:
dates = process_results[process_name][process_pid].keys()
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label='{} RSS'.format(process_pid))
plt.plot(dates, pss_samples, linewidth=1, label='{} PSS'.format(process_pid))
plt.plot(dates, uss_samples, linewidth=1, label='{} USS'.format(process_pid))
plt.plot(dates, vss_samples, linewidth=1, label='{} VSS'.format(process_pid))
plt.plot(dates, swap_samples, linewidth=1, label='{} SWAP'.format(process_pid))
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1],
rss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0],
pss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1],
pss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0],
uss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1],
uss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0],
vss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1],
vss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0],
swap_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1],
swap_samples[-1]), xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info('Plotted Same Type/Process Memory in: {}'.format(timediff))
def summary_csv_measurement_dump(csv_file, process_results, measurement):
csv_file.write('---------------------------------------------\n')
csv_file.write('Per Process {} Memory Usage\n'.format(measurement.upper()))
csv_file.write('---------------------------------------------\n')
csv_file.write('Process/Worker Type,PID,Start of test,End of test\n')
for ordered_name in process_order:
if ordered_name in process_results:
for process_pid in sorted(process_results[ordered_name]):
start = process_results[ordered_name][process_pid].keys()[0]
end = process_results[ordered_name][process_pid].keys()[-1]
csv_file.write('{},{},{},{}\n'.format(ordered_name, process_pid,
round(process_results[ordered_name][process_pid][start][measurement], 2),
round(process_results[ordered_name][process_pid][end][measurement], 2)))
| gpl-2.0 |
astocko/statsmodels | statsmodels/sandbox/survival2.py | 35 | 17924 | #Kaplan-Meier Estimator
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
from scipy import stats
from statsmodels.iolib.table import SimpleTable
class KaplanMeier(object):
"""
KaplanMeier(...)
KaplanMeier(data, endog, exog=None, censoring=None)
Create an object of class KaplanMeier for estimating
Kaplan-Meier survival curves.
Parameters
----------
data: array_like
An array, with observations in each row, and
variables in the columns
endog: index (starting at zero) of the column
containing the endogenous variable (time)
exog: index of the column containing the exogenous
variable (must be catagorical). If exog = None, this
is equivalent to a single survival curve
censoring: index of the column containing an indicator
of whether an observation is an event, or a censored
observation, with 0 for censored, and 1 for an event
Attributes
-----------
censorings: List of censorings associated with each unique
time, at each value of exog
events: List of the number of events at each unique time
for each value of exog
results: List of arrays containing estimates of the value
value of the survival function and its standard error
at each unique time, for each value of exog
ts: List of unique times for each value of exog
Methods
-------
fit: Calcuate the Kaplan-Meier estimates of the survival
function and its standard error at each time, for each
value of exog
plot: Plot the survival curves using matplotlib.plyplot
summary: Display the results of fit in a table. Gives results
for all (including censored) times
test_diff: Test for difference between survival curves
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02],
[ 9.00000000e+00, 1.13800000e-02],
[ 1.30000000e+01, 1.13800000e-02],
[ 1.40000000e+01, 1.13800000e-02],
[ 2.60000000e+01, 1.13800000e-02]])
>>> km = KaplanMeier(dta,0)
>>> km.fit()
>>> km.plot()
Doing
>>> km.summary()
will display a table of the estimated survival and standard errors
for each time. The first few lines are
Kaplan-Meier Curve
=====================================
Time Survival Std. Err
-------------------------------------
1.0 0.983870967742 0.0159984306572
2.0 0.91935483871 0.0345807888235
3.0 0.854838709677 0.0447374942184
4.0 0.838709677419 0.0467104592871
5.0 0.822580645161 0.0485169952543
Doing
>>> plt.show()
will plot the survival curve
Mutliple survival curves:
>>> km2 = KaplanMeier(dta,0,exog=1)
>>> km2.fit()
km2 will estimate a survival curve for each value of industrial
production, the column of dta with index one (1).
With censoring:
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 9.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 1.30000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 1.40000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 2.60000000e+01, 1.13800000e-02, 1.00000000e+00]])
>>> km3 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km3.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves for exog = 0.0645
and exog = -0.03957, the index one element is the degrees of freedom for
the test, and the index two element is the p-value for the test
Groups with nan names
>>> groups = np.ones_like(dta[:,1])
>>> groups = groups.astype('S4')
>>> groups[dta[:,1] > 0] = 'high'
>>> groups[dta[:,1] <= 0] = 'low'
>>> dta = dta.astype('S4')
>>> dta[:,1] = groups
>>> dta[range(5),:]
array([['7.0', 'high', '1.0'],
['9.0', 'high', '1.0'],
['13.0', 'high', '1.0'],
['14.0', 'high', '1.0'],
['26.0', 'high', '1.0']],
dtype='|S4')
>>> km4 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km4.fit()
"""
def __init__(self, data, endog, exog=None, censoring=None):
self.exog = exog
self.censoring = censoring
cols = [endog]
self.endog = 0
if exog != None:
cols.append(exog)
self.exog = 1
if censoring != None:
cols.append(censoring)
if exog != None:
self.censoring = 2
else:
self.censoring = 1
data = data[:,cols]
if data.dtype == float or data.dtype == int:
self.data = data[~np.isnan(data).any(1)]
else:
t = (data[:,self.endog]).astype(float)
if exog != None:
evec = data[:,self.exog]
evec = evec[~np.isnan(t)]
if censoring != None:
cvec = (data[:,self.censoring]).astype(float)
cvec = cvec[~np.isnan(t)]
t = t[~np.isnan(t)]
if censoring != None:
t = t[~np.isnan(cvec)]
if exog != None:
evec = evec[~np.isnan(cvec)]
cvec = cvec[~np.isnan(cvec)]
cols = [t]
if exog != None:
cols.append(evec)
if censoring != None:
cols.append(cvec)
data = (np.array(cols)).transpose()
self.data = data
def fit(self):
"""
Calculate the Kaplan-Meier estimator of the survival function
"""
self.results = []
self.ts = []
self.censorings = []
self.event = []
if self.exog == None:
self.fitting_proc(self.data)
else:
groups = np.unique(self.data[:,self.exog])
self.groups = groups
for g in groups:
group = self.data[self.data[:,self.exog] == g]
self.fitting_proc(group)
def plot(self):
"""
Plot the estimated survival curves. After using this method
do
plt.show()
to display the plot
"""
plt.figure()
if self.exog == None:
self.plotting_proc(0)
else:
for g in range(len(self.groups)):
self.plotting_proc(g)
plt.ylim(ymax=1.05)
plt.ylabel('Survival')
plt.xlabel('Time')
def summary(self):
"""
Print a set of tables containing the estimates of the survival
function, and its standard errors
"""
if self.exog == None:
self.summary_proc(0)
else:
for g in range(len(self.groups)):
self.summary_proc(g)
def fitting_proc(self, group):
"""
For internal use
"""
t = ((group[:,self.endog]).astype(float)).astype(int)
if self.censoring == None:
events = np.bincount(t)
t = np.unique(t)
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1]
else:
censoring = ((group[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
events = np.bincount(t,censoring)
censored = np.bincount(t,reverseCensoring)
t = np.unique(t)
censored = censored[:,list(t)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1] - censoredSum[:-1]
(self.censorings).append(censored)
survival = np.cumprod(1-events/n)
var = ((survival*survival) *
np.cumsum(events/(n*(n-events))))
se = np.sqrt(var)
(self.results).append(np.array([survival,se]))
(self.ts).append(t)
(self.event).append(events)
def plotting_proc(self, g):
"""
For internal use
"""
survival = self.results[g][0]
t = self.ts[g]
e = (self.event)[g]
if self.censoring != None:
c = self.censorings[g]
csurvival = survival[c != 0]
ct = t[c != 0]
if len(ct) != 0:
plt.vlines(ct,csurvival+0.02,csurvival-0.02)
x = np.repeat(t[e != 0], 2)
y = np.repeat(survival[e != 0], 2)
if self.ts[g][-1] in t[e != 0]:
x = np.r_[0,x]
y = np.r_[1,1,y[:-1]]
else:
x = np.r_[0,x,self.ts[g][-1]]
y = np.r_[1,1,y]
plt.plot(x,y)
def summary_proc(self, g):
"""
For internal use
"""
if self.exog != None:
myTitle = ('exog = ' + str(self.groups[g]) + '\n')
else:
myTitle = "Kaplan-Meier Curve"
table = np.transpose(self.results[g])
table = np.c_[np.transpose(self.ts[g]),table]
table = SimpleTable(table, headers=['Time','Survival','Std. Err'],
title = myTitle)
print(table)
def test_diff(self, groups, rho=None, weight=None):
"""
test_diff(groups, rho=0)
Test for difference between survival curves
Parameters
----------
groups: A list of the values for exog to test for difference.
tests the null hypothesis that the survival curves for all
values of exog in groups are equal
rho: compute the test statistic with weight S(t)^rho, where
S(t) is the pooled estimate for the Kaplan-Meier survival function.
If rho = 0, this is the logrank test, if rho = 0, this is the
Peto and Peto modification to the Gehan-Wilcoxon test.
weight: User specified function that accepts as its sole arguement
an array of times, and returns an array of weights for each time
to be used in the test
Returns
-------
An array whose zeroth element is the chi-square test statistic for
the global null hypothesis, that all survival curves are equal,
the index one element is degrees of freedom for the test, and the
index two element is the p-value for the test.
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> km = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves using the log rank test
for exog = 0.0645 and exog = -0.03957, the index one element
is the degrees of freedom for the test, and the index two element
is the p-value for the test
>>> wilcoxon = km.test_diff([0.0645,-0.03957], rho=1)
wilcoxon is the equivalent information as log_rank, but for the
Peto and Peto modification to the Gehan-Wilcoxon test.
User specified weight functions
>>> log_rank = km3.test_diff([0.0645,-0.03957], weight=np.ones_like)
This is equivalent to the log rank test
More than two groups
>>> log_rank = km.test_diff([0.0645,-0.03957,0.01138])
The test can be performed with arbitrarily many groups, so long as
they are all in the column exog
"""
groups = np.asarray(groups)
if self.exog == None:
raise ValueError("Need an exogenous variable for logrank test")
elif (np.in1d(groups,self.groups)).all():
data = self.data[np.in1d(self.data[:,self.exog],groups)]
t = ((data[:,self.endog]).astype(float)).astype(int)
tind = np.unique(t)
NK = []
N = []
D = []
Z = []
if rho != None and weight != None:
raise ValueError("Must use either rho or weights, not both")
elif rho != None:
s = KaplanMeier(data,self.endog,censoring=self.censoring)
s.fit()
s = (s.results[0][0]) ** (rho)
s = np.r_[1,s[:-1]]
elif weight != None:
s = weight(tind)
else:
s = np.ones_like(tind)
if self.censoring == None:
for g in groups:
dk = np.bincount((t[data[:,self.exog] == g]))
d = np.bincount(t)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
dk = dk[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
nk = len(data[data[:,self.exog] == g]) - dkSum[:-1]
n = len(data) - dSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
else:
for g in groups:
censoring = ((data[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
censored = np.bincount(t,reverseCensoring)
ck = np.bincount((t[data[:,self.exog] == g]),
reverseCensoring[data[:,self.exog] == g])
dk = np.bincount((t[data[:,self.exog] == g]),
censoring[data[:,self.exog] == g])
d = np.bincount(t,censoring)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
ck = np.r_[ck,[0]*dif]
dk = dk[:,list(tind)]
ck = ck[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
ck = ck.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
ck = np.cumsum(ck)
ck = np.r_[0,ck]
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
censored = censored[:,list(tind)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
nk = (len(data[data[:,self.exog] == g]) - dkSum[:-1]
- ck[:-1])
n = len(data) - dSum[:-1] - censoredSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
Z = np.array(Z)
N = np.array(N)
D = np.array(D)
NK = np.array(NK)
sigma = -1 * np.dot((NK/N) * ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(NK/N))
np.fill_diagonal(sigma, np.diagonal(np.dot((NK/N)
* ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(1 - (NK/N)))))
chisq = np.dot(np.transpose(Z),np.dot(la.pinv(sigma), Z))
df = len(groups) - 1
return np.array([chisq, df, stats.chi2.sf(chisq,df)])
else:
raise ValueError("groups must be in column exog")
| bsd-3-clause |
ccauet/scikit-optimize | skopt/space/transformers.py | 1 | 4631 | import numpy as np
from sklearn.preprocessing import LabelBinarizer
# Base class for all 1-D transformers.
class Transformer(object):
def fit(self, X):
return self
def transform(self, X):
raise NotImplementedError
def inverse_transform(self, X):
raise NotImplementedError
class Identity(Transformer):
"""Identity transform."""
def transform(self, X):
return X
def inverse_transform(self, Xt):
return Xt
class Log10(Transformer):
"""Base 10 logarithm transform."""
def transform(self, X):
return np.log10(np.asarray(X, dtype=np.float))
def inverse_transform(self, Xt):
return 10.0 ** np.asarray(Xt, dtype=np.float)
class CategoricalEncoder(Transformer):
"""OneHotEncoder that can handle categorical variables."""
def __init__(self):
"""Convert labeled categories into one-hot encoded features."""
self._lb = LabelBinarizer()
def fit(self, X):
"""Fit a list or array of categories.
Parameters
----------
* `X` [array-like, shape=(n_categories,)]:
List of categories.
"""
self.mapping_ = {v: i for i, v in enumerate(X)}
self.inverse_mapping_ = {i: v for v, i in self.mapping_.items()}
self._lb.fit([self.mapping_[v] for v in X])
self.n_classes = len(self._lb.classes_)
return self
def transform(self, X):
"""Transform an array of categories to a one-hot encoded representation.
Parameters
----------
* `X` [array-like, shape=(n_samples,)]:
List of categories.
Returns
-------
* `Xt` [array-like, shape=(n_samples, n_categories)]:
The one-hot encoded categories.
"""
return self._lb.transform([self.mapping_[v] for v in X])
def inverse_transform(self, Xt):
"""Inverse transform one-hot encoded categories back to their original
representation.
Parameters
----------
* `Xt` [array-like, shape=(n_samples, n_categories)]:
One-hot encoded categories.
Returns
-------
* `X` [array-like, shape=(n_samples,)]:
The original categories.
"""
Xt = np.asarray(Xt)
return [
self.inverse_mapping_[i] for i in self._lb.inverse_transform(Xt)
]
class Normalize(Transformer):
"""
Scales each dimension into the interval [0, 1].
Parameters
----------
* `low` [float]:
Lower bound.
* `high` [float]:
Higher bound.
* `is_int` [bool, default=True]
Round and cast the return value of `inverse_transform` to integer. Set
to `True` when applying this transform to integers.
"""
def __init__(self, low, high, is_int=False):
self.low = low
self.high = high
self.is_int = is_int
def transform(self, X):
X = np.asarray(X)
if np.any(X > self.high):
raise ValueError("All values should be less than %f" % self.high)
if np.any(X < self.low):
raise ValueError("All values should be greater than %f" % self.low)
return (X - self.low) / (self.high - self.low)
def inverse_transform(self, X):
X = np.asarray(X)
if np.any(X > 1.0):
raise ValueError("All values should be less than 1.0")
if np.any(X < 0.0):
raise ValueError("All values should be greater than 0.0")
X_orig = X * (self.high - self.low) + self.low
if self.is_int:
return np.round(X_orig).astype(np.int)
return X_orig
class Pipeline(Transformer):
"""
A lightweight pipeline to chain transformers.
Parameters
----------
* 'transformers' [list]:
A list of Transformer instances.
"""
def __init__(self, transformers):
self.transformers = list(transformers)
for transformer in self.transformers:
if not isinstance(transformer, Transformer):
raise ValueError(
"Provided transformers should be a Transformer "
"instance. Got %s" % transformer
)
def fit(self, X):
for transformer in self.transformers:
transformer.fit(X)
return self
def transform(self, X):
for transformer in self.transformers:
X = transformer.transform(X)
return X
def inverse_transform(self, X):
for transformer in self.transformers[::-1]:
X = transformer.inverse_transform(X)
return X
| bsd-3-clause |
natasasdj/OpenWPM | analysis/15_images_zero.py | 1 | 9003 | import os
import sqlite3
import pandas as pd
import numpy as np
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from statsmodels.distributions.empirical_distribution import ECDF
def thousands(x, pos):
if x>=1e9:
return '%dB' % (x*1e-9)
elif x>=1e6:
return '%dM' % (x*1e-6)
elif x>=1e3:
return '%dK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
def ecdf_for_plot(sample):
#x = np.linspace(min(sample), max(sample))
print "sample: ",type(sample)
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
# print ecdf
print "ecdf: ",type(ecdf)
y = ecdf(x)
#print y
print "y: ", type(y)
return (x,y)
res_dir = '/home/nsarafij/project/OpenWPM/analysis/results/'
db = res_dir + 'images.sqlite'
conn = sqlite3.connect(db)
query = 'SELECT * FROM Images'
df = pd.read_sql_query(query,conn)
zeroes = df.ix[df['size']==0]
zeroes.shape[0]#2693959
df.shape[0]#31861758
zeroes.shape[0]/float(df.shape[0])#0.0845
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
### content-length
(x,y) = ecdf_for_plot(zeroes['cont_length'])
fig, ax = plt.subplots()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('content-length')
plt.grid(True)
ax.xaxis.set_major_formatter(formatter)
plt.savefig(os.path.join(fig_dir,'zeroes_cont_length_cdf.png'))
plt.show()
plt.figure()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('content-length')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'zeroes_cont_length_cdf_log.png'))
plt.show()
(zeroes['cont_length']==zeroes['size']).sum()/float(zeroes.shape[0]) #0.15947532980271786
### domains all
# first-party domains (with subdomain)
(zeroes['site_id']==zeroes['resp_domain']).sum()/float(zeroes.shape[0])*100
#23.3%
# third-party domains (with subdomain)
(zeroes['site_id']!=zeroes['resp_domain']).sum()/float(zeroes.shape[0])*100
#76.6%
zeroes.columns = ['respDom_id' if x=='resp_domain' else x for x in zeroes.columns]
query = 'SELECT * FROM Domain_DomainTwoPart'
df_domdom2 = pd.read_sql_query(query,conn)
zeroes=zeroes.merge(df_domdom2, left_on = 'site_id', right_on = 'domain_id', how = 'left')
zeroes.drop('domain_id',inplace=True,axis=1)
zeroes.columns = ['site_id2' if x=='domainTwoPart_id' else x for x in zeroes.columns]
zeroes=zeroes.merge(df_domdom2, left_on = 'respDom_id', right_on = 'domain_id', how = 'left')
zeroes.drop('domain_id',inplace=True,axis=1)
zeroes.columns = ['respDom_id2' if x=='domainTwoPart_id' else x for x in zeroes.columns]
query = 'SELECT * FROM DomainsTwoPart'
df_dom2 = pd.read_sql_query(query,conn)
zeroes=zeroes.merge(df_dom2, left_on = 'site_id2', right_on = 'id', how = 'left')
zeroes.drop('id',inplace=True,axis=1)
zeroes.columns = ['site_domain2' if x=='domainTwoPart' else x for x in zeroes.columns]
zeroes=zeroes.merge(df_dom2, left_on = 'respDom_id2', right_on = 'id', how = 'left')
zeroes.drop('id',inplace=True,axis=1)
zeroes.columns = ['respDom_domain2' if x=='domainTwoPart' else x for x in zeroes.columns]
query = 'SELECT * FROM Domain2Company'
df_dom2com = pd.read_sql_query(query,conn)
zeroes=zeroes.merge(df_dom2com, left_on = 'respDom_id2', right_on = 'domainTwoPart_id', how = 'left')
zeroes.drop('domainTwoPart_id',inplace=True,axis=1)
query = 'SELECT * FROM Companies'
df_com = pd.read_sql_query(query,conn)
zeroes=zeroes.merge(df_com, left_on = 'company_id', right_on = 'id', how = 'left')
zeroes.drop('id',inplace=True,axis=1)
### domains all
# first-party domains 2 (without subdomain)
(zeroes['site_id2']==zeroes['respDom_id2']).sum()/float(zeroes.shape[0])*100
# 43%
# third-party domains 2 (without subdomain)
(zeroes['site_id2']!=zeroes['respDom_id2']).sum()/float(zeroes.shape[0])*100
# 56%
#zeroes['domain2']=zeroes['domain'].map(twoPart_domain)
### third-party domains
zeroes_ = zeroes.loc[zeroes['site_id2']!=zeroes['respDom_id2']]
domains = zeroes_['respDom_domain2'].value_counts()
total = zeroes_.shape[0]
domains_cum = domains.cumsum()
dom_perc = domains/float(total)
dom_perc_cum = dom_perc.cumsum()
# cdf of number of zero images per third-party domains
(x,y) = ecdf_for_plot(domains)
plt.figure()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('no of zero images per domain')
plt.grid(True)
plt.xscale('log')
plt.savefig(os.path.join(fig_dir,'zeroes_third-domains_cdf.png'))
plt.show()
# cumulative percentages per domain rank
fig = plt.figure()
plt.plot(range(1,dom_perc_cum.size+1),dom_perc_cum*100,marker='.')
plt.xscale('log')
plt.title('Cumulative Percentage Counts')
plt.xlabel('domain rank')
plt.ylabel('percentage of zero images')
plt.grid(True)
fig.savefig(fig_dir + 'zeroes_third-domain2_perc_cum.png',format='png')
plt.show()
# cumulative percentages per number of zero images coming from a domain
domains_counts = domains.value_counts().sort_index()
domains_counts_cum = domains_counts.multiply(domains_counts.index).cumsum()
domains_perc_cum=domains_counts_cum/float(domains_counts_cum.iloc[-1])
fig = plt.figure()
plt.plot(domains_perc_cum.index,domains_perc_cum*100,marker='.')
plt.xscale('log')
plt.title('Cumulative Percentages')
plt.xlabel('number of zero images coming from a domain')
plt.ylabel('percentage of zero images')
plt.grid(True)
fig.savefig(fig_dir + 'zeroes_third-domain2No_perc_cum.png',format='png')
plt.show()
# third-party domain ranks and corresponding percentages of zero images
fig = plt.figure()
plt.plot(range(1,domains.size+1),dom_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('domain rank')
plt.ylabel('percentage of zero images')
plt.xlim([1,domains.size+1])
plt.grid(True)
fig.savefig(fig_dir + 'zeroes_third-domain2_perc.png',format='png')
plt.show()
# top 30 third-party domains - percentages
n=30
x=np.arange(0.5,n)
fig = plt.figure()
plt.bar(x,dom_perc[0:n]*100,align='center')
plt.xlabel('domains')
plt.ylabel('percentage of zero images')
labels = list(domains.index[0:n])
plt.xticks(x, labels, rotation=80)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'zeroes_third-domain2_perc_top30.png',format='png')
plt.show()
### table domains - companies
domcom = zeroes_[['respDom_domain2','company']].groupby(['respDom_domain2','company']).size().reset_index(name='img_perc').sort_values('img_perc',ascending=False)
domcom['img_perc']=domcom['img_perc']/float(zeroes_.shape[0])*100
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-domain2company_zero_perc_top30.txt'),'w+')
for i in range(0,n):
dom = domcom.iloc[i,0]
comp = domcom.iloc[i,1]
perc = domcom.iloc[i,2]
s = str(i+1) + ' & ' + dom + ' & ' + comp + ' & ' + '%.2f' % perc + '\\\\ \\hline'
#print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
### companies
companies = zeroes_['company'].value_counts()
total = zeroes_.shape[0]
companies_cum = companies.cumsum()
com_perc = companies/float(total)
com_perc_cum = com_perc.cumsum()
companies.size #854
# third-party company ranks and corresponding percentages of zero images
fig = plt.figure()
plt.plot(range(1,companies.size+1),com_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('percentage of zero images')
plt.xlim([1,companies.size+1])
plt.grid(True)
fig.savefig(fig_dir + 'third-company_zero_perc.png',format='png')
plt.show()
#cumulative
fig = plt.figure()
plt.plot(range(1,com_perc_cum.size+1),com_perc_cum*100,marker='.')
plt.xscale('log')
#plt.title('Cumulative Percentages')
plt.xlabel('domain rank')
plt.ylabel('percentage of zero images')
plt.grid(True)
fig.savefig(fig_dir + 'third-company_zero_perc_cum.png',format='png')
plt.show()
# top 30 third-party companies - percentages
n=30
x=np.arange(0.5,n)
fig = plt.figure()
plt.bar(x,com_perc[0:n]*100,align='center')
plt.xlabel('companies')
plt.ylabel('percentage of zero images')
labels = list(companies.index[0:n])
plt.xticks(x, labels, rotation=90)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_zero_perc_top30.png',format='png')
plt.show()
### table: top 30 companies
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-company_zero_perc_top30.txt'),'w+')
for i in range(0,n):
com = com_perc.index[i]
perc = com_perc[i]*100
s = str(i+1) + ' & ' + com + ' & ' + '%.3f' % perc + '\\\\ \\hline'
print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
# table in latex
# None company
zeroes_.loc[zeroes_['company_id']==4].shape[0]/float(zeroes_.shape[0])
# 0.03896522119492128
### put this into results as well
zeroes_['company'].isnull().sum()/float(zeroes_.shape[0])
# 0.00049804140653672828
### type of zero images
zeroes.loc[zeroes['type']==zeroes['cont_type']]
(zeroes['type']==zeroes['cont_type']).sum()
# 0
zeroes.groupby('type').size()
zeroes.groupby('cont_type').size()
zeroes
| gpl-3.0 |
carolFrohlich/nipype | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 10 | 7922 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from builtins import str, bytes
import re
import inspect
import textwrap
import pydoc
import sphinx
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in list(idx.items()):
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], (str, bytes)):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str and
'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
AlexRBaker/ScaffoldM | pwd/lib/python2.7/site-packages/scaffoldm/templateClass.py | 2 | 4971 | #!/usr/bin/env python
###############################################################################
# #
# scaffoldm.py #
# #
# Description!! #
# #
# Copyright (C) Michael Imelfort #
# #
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Michael Imelfort"
__copyright__ = "Copyright 2014"
__credits__ = ["Michael Imelfort"]
__license__ = "GPLv3"
__maintainer__ = "Michael Imelfort"
__email__ = "mike@mikeimelfort.com"
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# system includes
# local includes
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class TemplateClass():
"""Utilities wrapper"""
def __init__(self): pass
def sayHi(self):
print('write some "REAL" code you bum!')
def runCommand(self, cmd):
"""Run a command and take care of stdout
expects 'cmd' to be a string like "foo -b ar"
returns (stdout, stderr)
"""
from multiprocessing import Pool
from subprocess import Popen, PIPE
p = Popen(cmd.split(' '), stdout=PIPE)
return p.communicate()
def parseFile(self, filename):
"""parse a file"""
import sys
try:
with open(filename, "r") as fh:
for line in fh:
print line
except:
print "Error opening file:", filename, sys.exc_info()[0]
raise
def plot3DFigure(self, points, fileName=""):
"""make a 3d plot"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from pylab import plot,subplot,axis,stem,show,figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0],
points[:,1],
points[:,2],
#edgecolors='none',
#c=colors,
#s=2,
#marker='.'
)
# show figure
if fileName == "":
plt.show()
else:
# or save figure
plt.savefig(filename)#,dpi=300,format='png')
plt.close(fig)
del fig
def plot2DFigure(self, points, fileName=""):
"""make a 2d plot"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from pylab import plot,subplot,axis,stem,show,figure
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(points[:,0],
points[:,1],
'*g')
# show figure
if fileName == "":
plt.show()
else:
# or save figure
plt.savefig(filename)#,dpi=300,format='png')
plt.close(fig)
del fig
| gpl-3.0 |
datacommonsorg/data | scripts/biomedical/pharmgkb/drug_gene_relations/pharm.py | 1 | 23854 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate instance mcf from PharmGKB files related to drug-gene associations.
The PharmGKB files (drugs.tsv, chemicals.tsv, genes.tsv, relationships.tsv) are
automatically downloaded and read in as pandas dataframes. Entities from
drugs.tsv and chemicals.tsv are both referred to as drugs, unless otherwise
indicated throughout this file. Data Commons Drug dcids are generated by using
the ./conversion files to map PharmGKB ID, PubChem Compound ID, or InChI to
ChEMBL IDs. Gene dcids are created by prepending the gene symbol with
'bio/hg19' and 'bio/hg38' followed by a '_' separator.
Usage:
$python3 pharm.py
"""
from sys import path
import zipfile
import io
import requests
import pandas as pd
import config
path.insert(1, '../../../')
# from Data Commons util folder
from util import mcf_template_filler
def download_pharmgkb_datasets():
"""Downloads dataset files from PharmGKB.
Downloads chemicals.zip, drugs.zip, genes.zip, and relationships.zip to
./raw_data/<zip_file_name>/ . Each zipfile contains a tsv file of the
necessary data, along with a README, and License, Creation, and Version
information files.
"""
urls = {
'chemicals':
'https://api.pharmgkb.org/v1/download/file/data/chemicals.zip',
'drugs':
'https://api.pharmgkb.org/v1/download/file/data/drugs.zip',
'genes':
'https://api.pharmgkb.org/v1/download/file/data/genes.zip',
'relationships':
'https://api.pharmgkb.org/v1/download/file/data/relationships.zip',
}
for zip_dir, url in urls.items():
download = requests.get(url)
files = zipfile.ZipFile(io.BytesIO(download.content))
files.extractall('./raw_data/' + zip_dir)
def merge_chembls(chembl1, chembl2, chembl3):
"""Return single ChEMBL ID from the three ChEMBL IDs given.
Args:
chembl1: ChEMBL ID based on PharmGKB ID
chembl2: ChEMBL ID based on PubChem Compound ID
chembl3: ChEMBL ID based on InChI
Returns:
A single ChEMBL ID based on the the three given ids. Chembl retreived
from pharmgkb has priorty, then PubChem Compound ID, then by InChI.
"""
if not pd.isnull(chembl1) and chembl1:
return chembl1
if not pd.isnull(chembl2) and chembl2:
return chembl2
if not pd.isnull(chembl3) and chembl3:
return chembl3
return None
def append_chembls(combined_df):
"""Appends 'ChEMBL ID' and 'InChI Key' to a copy of given pandas dataframe.
The conversion files from ./conversion are read in and a new ChEMBL ID
column is appended for each file. Then the three columns are combined into
'ChEMBL ID' by calling merge_chembls(). 'Chembl1' is based on the PharmGKB
ID, 'Chembl2' is based on the PubChem Compound ID, 'Chembl3' is based on
InChI/InChI Key.
Args:
df: combined drugs and chemical dataframe containing PharmGKB
Returns:
Copy of the given dataframe with the 'ChEMBL ID' and 'InChI Key' columns
appended, along with the extra 'Chembl1','Chembl2', and 'Chembl3'
columns.
"""
drugs_df = combined_df.copy(deep=True)
# read in ChEMBL ID based off of pharmgkb id
pharm_chembl_df = pd.read_csv(
'./conversion/pharm_id_to_chembl_combined.csv')
pharm_to_chembl_dict = pd.Series(
pharm_chembl_df['ChEMBL ID'].values,
index=pharm_chembl_df['PharmGKB ID']).to_dict()
drugs_df['Chembl1'] = [
pharm_to_chembl_dict[pharm_id]
if pharm_id in pharm_to_chembl_dict else ''
for pharm_id in drugs_df['PharmGKB Accession Id']
]
# read in chembl based off of PubChem Compound ID
pubchem_chembl_df = pd.read_csv(
'./conversion/pubchem_id_to_chembl_combined.csv')
pubchem_to_chembl_dict = pd.Series(
pubchem_chembl_df['ChEMBL ID'].values,
index=pubchem_chembl_df['PubChem ID']).to_dict()
drugs_df['Chembl2'] = [
pubchem_to_chembl_dict[pubchem_id]
if pubchem_id in pubchem_to_chembl_dict else ''
for pubchem_id in drugs_df['PubChem Compound Identifiers']
]
# read in ChEMBL IDs based off of InChI id
inchi_keys = pd.read_csv(
'./conversion/inchi_to_inchi_key_combined.csv')['InChI Key']
inchis = pd.read_csv(
'./conversion/inchi_to_inchi_key_combined.csv')['InChI']
chembl_ids = pd.read_csv(
'./conversion/inchi_key_to_chembl_combined.csv')['ChEMBL ID']
inchi_to_chembl_dict = pd.Series(chembl_ids.values, index=inchis).to_dict()
drugs_df['Chembl3'] = [
inchi_to_chembl_dict[inchi] if inchi in inchi_to_chembl_dict else ''
for inchi in drugs_df['InChI']
]
drugs_df['InChI Key'] = inchi_keys
drugs_df['ChEMBL ID'] = drugs_df.apply(lambda row: merge_chembls(
row['Chembl1'], row['Chembl2'], row['Chembl3']),
axis=1)
return drugs_df
def get_drugs_df():
"""Returns a combined drugs data frame.
Returns:
Dataframe containg chemicals.tsv, drugs.tsv as well as ChEMBL ID and
InChI Key information retreived from ./conversion/*.csv files.
"""
chemicals_df = pd.read_csv('./raw_data/chemicals/chemicals.tsv', sep='\t')
drugs_df = pd.read_csv('./raw_data/drugs/drugs.tsv', sep='\t')
combined_df = drugs_df.append(chemicals_df, ignore_index=True)
combined_df = combined_df.drop_duplicates()
drugs_df = append_chembls(combined_df)
drugs_df.fillna('', inplace=True)
return drugs_df
def format_text_list(text_list):
"""Create a formatted version of the given string according to MCF standard.
This is used to format many columns from the drugs and genes dataframes like
PubChem Compound IDentifiers or NCBI Gene ID.
Args:
text_list: A single string representing a comma separated list. Some of
the items are enlcosed by double quotes and some are not.
Returns:
A string that is mcf property text values list enclosed by double quotes
and comma separated.
Example:
input: ('test1,
"Carbanilic acid, M,N-dimethylthio-, O-2-naphthyl ester", "test2"')
return: ('"test1",
"Carbanilic acid, M,N-dimethylthio-, O-2-naphthyl ester", "test2"')
"""
if not text_list:
return ''
formatted_str = []
in_paren = False
cur_val = ''
for char in text_list:
if char == '"':
in_paren = not in_paren
elif char == ',':
# if the comma is in between double quotes, then do not split
if in_paren:
cur_val += char
# otherwise, split on the comma, prepend and append double quotes
else:
formatted_str.append('"' + cur_val.strip() + '"')
cur_val = ''
else:
cur_val += char
formatted_str.append('"' + cur_val.strip() + '"')
return ','.join(formatted_str)
def format_semicolon_list(semi_list):
"""Formats a string representing a semi colon separated list into MCF
property values list format.
This is used to format 'PMIDs' in relationships.tsv.
Args:
semi_list: a string representing a semi colon separated list
Returns:
A string that is mcf property text values list enclosed by double quotes
and comma separated.
"""
if not semi_list:
return ''
formatted_str = ''
for prop_value in semi_list.split(';'):
formatted_str += '"' + prop_value + '",'
return formatted_str.strip(',')
def format_bool(text, true_val):
"""Checks to see if given value matches the value that should return True.
This is used to format the boolean values from 'PK' and 'PD' columns from
relationships.tsv. For example, the values in 'PK' column are 'PK' if the
relationship is pharmacokinetic, thus this function should return true only
if the value is equal to 'PK'. We cannot conclude that a relationship is not
pharmacokinetic by lack of 'PK' value, thus 'True' and empty are the only
possible values.
True is returned as a String becuase the output of this function is given
directly to a template filler, which only takes Strings. An empty
String is returned if not True so that the template filler will know to
remove the missing property from the generated mcf string. If a
relationship is not marked with PK or PD that means there is no data to add
for the that property, it does not necessarily it should be False.
Args:
text: the raw value that needs to be checked
true_val: the value that 'text' should be to return true
Returns:
'True' (as a string) if 'text' matches 'true_val' and an empty string
otherwise.
"""
if text == true_val:
return 'True'
return ''
def get_enum(key_list, enum_dict):
"""Returns the mcf format of enum dcid list that represents a given value(s).
Used to convert a list of values which map to enums to their appropriate
enum dcid, as given by the enum_dict. For this import, enum_dict is either
ASSOCIATION_ENUM_DICT or EVIDENCE_ENUM_DICT from config.py .
Args:
key_list: a string representing a comma-separated list of enum mapping
values
enum_dict: value to enum dcid mapping, each item of key_list should be a
key
Returns:
a string representing a comma separated list of enum dcids with the 'dcid'
context indentifier.
"""
if not key_list:
return ''
formatted_enums = ''
for key in key_list.split(','):
formatted_enums += 'dcid:' + enum_dict[key] + ','
return formatted_enums.strip(',')
def get_xref_mcf(xrefs, xref_to_label):
"""Returns the mcf format of a given string of xrefs.
Convert a list of xrefs to their mcf format of <prop_label>: <prop_text_value>
using the xref_to_label dict to lookup the property label of the given
indentifier. For this import, xref_to_label is either GENE_XREF_PROP_DICT or
DRUG_XREF_PROP_DICT from config.py .
Args:
xref: a string representing a comma-separated list of xrefs enclosed by
double quotes
xref_to_label: xref name in pahrmgkb to DC property label mapping
Returns:
a multiline mcf formatted string of all of the xrefs' prop labels + values
"""
xref_mcf = ''
if not xrefs:
return ''
for xref in xrefs.split(','):
xref_pair = xref.replace('"', '').strip().split(':')
if xref_pair[0] not in xref_to_label:
print('unexpected format in gene xrefs:' + xrefs)
continue
prop_label = xref_to_label[xref_pair[0]]
prop_value = ':'.join(xref_pair[1:]).strip()
xref_mcf += prop_label + ': "' + prop_value + '"\n'
return xref_mcf
def get_gene_dcids(symbol):
"""Returns the dcid of a gene created from the gene symbol. """
if symbol:
return ['bio/hg19_' + symbol, 'bio/hg38_' + symbol]
return ''
def get_drug_dcid(row):
"""Returns dcid of a drug.
If the ChEMBL ID of the drug was not found, then a new dcid for the drug is
created based on the pharmGKB id.
"""
if row['ChEMBL ID']:
return 'bio/' + row['ChEMBL ID']
return 'bio/' + row['PharmGKB Accession Id']
def get_compound_type(compound_types):
"""Returns mcf value format of the typeOf property for a compound.
This is applied to the 'Type' entry of each row of drugs_df.
Args:
compound_types: string of comma separated list of type values
Returns:
If the compound is of a drug type, then typeOf value should be dcs:Drug
Otherwise the typeOf should be dcid:ChemicalCompound
If the list contains both drug types and non drug types, then the typeOf
should be dcid:ChemicalCompound,dcid:Drug
"""
drug_types = ['Drug', 'Drug Class', 'Prodrug']
types = set()
for compound_type in compound_types.split(','):
compound_type = compound_type.replace('"', '').strip()
if compound_type in drug_types:
types.add('dcid:Drug')
else:
types.add('dcid:ChemicalCompound')
if len(types) == 2:
return 'dcs:ChemicalCompound,dcs:Drug'
return types.pop()
def get_gene_mcf(row, gene_dcid):
"""Returns the mcf of gene node given its dcid and genes_df row information.
Uses GENE_TEMPLATE from config.py, mcf_template_filler from DC data/util, and
the values from the genes_df row to generate the mcf of a gene node. Helper
methods such as format_text_list and get_xref_mcf are used to format the data.
Args:
row: a row from genes_df
gene_dcid: dcid of the gene mcf node to be created
Returns:
An mcf formatted string of the gene node.
"""
ncbi = format_text_list(row['NCBI Gene ID'])
hgnc = format_text_list(row['HGNC ID'])
ensembl = format_text_list(row['Ensembl Id'])
alt_symb = format_text_list(row['Alternate Symbols'])
templater = mcf_template_filler.Filler(config.GENE_TEMPLATE,
required_vars=['dcid'])
template_dict = {
'dcid': gene_dcid,
'name': row['Name'],
'symbol': row['Symbol'],
'pharm_id': row['PharmGKB Accession Id'],
'ncbi_ids': ncbi,
'hgnc_ids': hgnc,
'ensembl_ids': ensembl,
'alt_symbols': alt_symb,
}
# remove empty values from dict
template_dict = {
key: value for key, value in template_dict.items() if value
}
mcf = templater.fill(template_dict)
mcf += get_xref_mcf(row['Cross-references'], config.GENE_XREF_PROP_DICT)
return mcf
def write_gene_row(mcf_file, row, pharm_to_dcid):
"""Writes mcf formatted strings of a row from gene_df to file.
Retreives the dcids for the gene symbol. Each row of gene_df represents two
gene nodes in Data Commons because every one gene symbol yeilds two dcids.
Stores the pharmgkb id to dcid mapping in pharm_to_dcid dictionary which will
be used in parsing relationships.tsv. Then gets and writes the mcf string to
file for each gene dcid.
Args:
f: output mcf file
row: row from genes_df pandas DataFrame
pharm_to_dcid: pharmgkb id to dcid of genes dictionary mapping
"""
gene_dcids = get_gene_dcids(row['Symbol'])
pharm_to_dcid[row['PharmGKB Accession Id']] = gene_dcids
for gene_dcid in gene_dcids:
mcf = get_gene_mcf(row, gene_dcid)
mcf_file.write(mcf)
def get_drug_mcf(row, drug_dcid):
"""Returns the mcf of drug node given its dcid and drugs_df row information.
Uses DRUG_TEMPLATE from config.py, mcf_template_filler from DC data/util, and
the values from the drugs_df row to generate the mcf of a drug node. Helper
methods such as format_text_list, get_compound_type, and get_xref_mcf are used
to format the data.
Args:
row: a row from drugss_df
drug_dcid: dcid of the drug mcf node to be created
Returns:
An mcf formatted string of the drug node.
"""
trade = format_text_list(row['Trade Names'])
rx_id = format_text_list(row['RxNorm Identifiers'])
atc = format_text_list(row['ATC Identifiers'])
pubchem = format_text_list(row['PubChem Compound Identifiers'])
compound_type = get_compound_type(row['Type'])
dc_name = drug_dcid.replace('bio/', '')
templater = mcf_template_filler.Filler(config.DRUG_TEMPLATE,
required_vars=['dcid', 'type'])
template_dict = {
'dcid': drug_dcid,
'type': compound_type,
'dc_name': dc_name,
'name': row['Name'],
'trade_names': trade,
'smiles': row['SMILES'],
'inchi': row['InChI'],
'inchi_key': row['InChI Key'],
'pharm_id': row['PharmGKB Accession Id'],
'rx_ids': rx_id,
'atc_ids': atc,
'pubchem_compound_ids': pubchem,
}
template_dict = {
key: value for key, value in template_dict.items() if value
}
mcf = templater.fill(template_dict)
mcf += get_xref_mcf(row['Cross-references'], config.DRUG_XREF_PROP_DICT)
return mcf
def write_drug_row(mcf_file, row, pharm_to_dcid):
"""Writes mcf formatted string of a row from drugs_df to file.
Retreives the dcid for the drug row. Stores the pharmgkb id to dcid mapping
in pharm_to_dcid dictionary which will be used in parsing relationships.tsv.
Then gets and writes the mcf string to file for the drug dcid.
Args:
f: output mcf file
row: row from drugs_df pandas DataFrame
pharm_to_dcid: pharmgkb id to dcid of drugs dictionary mapping
"""
drug_dcid = get_drug_dcid(row)
pharm_to_dcid[row['PharmGKB Accession Id']] = drug_dcid
mcf = get_drug_mcf(row, drug_dcid)
mcf_file.write(mcf)
def get_relation_mcf(row, drug_dcid, gene_dcid):
"""Returns the mcf of ChemicalCompoundGeneAssociation node given the dcids of
the drug and gene involved as well as the relations.tsv based row information.
Uses RELATION_TEMPLATE from config.py, mcf_template_filler from DC data/util,
and the values from the given dataframe row to generate the mcf. Helper
methods such as format_semicolon_list, format_bool, and get_enum are used to
format the data.
Args:
row: a row from either drug_gene_df or gene_drug_df
drug_dcid: dcid of the drug node involved in the association
gene_dcid: dcid of the gene node involved in the association
Returns:
An mcf formatted string of the ChemicalCompoundGeneAssociation node.
"""
drug_ref = drug_dcid.replace('bio/', '')
gene_ref = gene_dcid.replace('bio/', '')
pubmed = format_semicolon_list(row['PMIDs'])
pk_bool = format_bool(row['PK'], 'PK')
pd_bool = format_bool(row['PD'], 'PD')
assoc_enum = get_enum(row['Association'], config.ASSOCIATION_ENUM_DICT)
evid_enum = get_enum(row['Evidence'], config.EVIDENCE_ENUM_DICT)
templater = mcf_template_filler.Filler(
config.RELATION_TEMPLATE,
required_vars=['dcid', 'gene_dcid', 'drug_dcid'])
template_dict = {
'dcid': 'bio/CGA_' + drug_ref + '_' + gene_ref,
'name': 'CGA_' + drug_ref + '_' + gene_ref,
'gene_dcid': gene_dcid,
'drug_dcid': drug_dcid,
'pubmed_ids': pubmed,
'pk_bool': pk_bool,
'pd_bool': pd_bool,
'assoc_enums': assoc_enum,
'evid_enums': evid_enum,
}
template_dict = {
key: value for key, value in template_dict.items() if value
}
mcf = templater.fill(template_dict)
return mcf
def write_relation_row(mcf_file, row, drug_is_first, genes_pharm_to_dcid,
drugs_pharm_to_dcid):
"""Writes mcf string of a row from drug_gene_df or gene_drug_df to file.
Determines the drug dcid and gene dcids, then retreives the
ChemicalCompoundGeneAssociation mcf for each gene dcid and writes the mcf to
the given file.
Args:
f: output mcf file
row: a row from either drug_gene_df or gene_drug_df
drug_is_first: boolean indicating if the pharmGKB id of the drug is
'Entity1_id' or 'Entity2_id'
genes_pharm_to_dcid: pharmgkb id to dcids of gene dictionary mapping
drugs_pharm_to_dcid: pharmgkb id to dcids of drug dictionary mapping
"""
if drug_is_first:
drug_pharm = row['Entity1_id']
gene_pharm = row['Entity2_id']
else:
drug_pharm = row['Entity2_id']
gene_pharm = row['Entity1_id']
if drug_pharm not in drugs_pharm_to_dcid:
print('unrecognized drug pharm id: ' + drug_pharm)
return
if gene_pharm not in genes_pharm_to_dcid:
print('unrecognized gene pharm id: ' + gene_pharm)
return
drug_dcid = drugs_pharm_to_dcid[drug_pharm]
gene_dcids = genes_pharm_to_dcid[gene_pharm]
for gene_dcid in gene_dcids:
mcf = get_relation_mcf(row, drug_dcid, gene_dcid)
mcf_file.write(mcf)
def main():
"""Generates ./pharmgkb.mcf
Downloads the related pharmgkb dataset files. Creates pandas DataFrames from
each of the .tsv files:
drugs_df - combined data from chemicals.tsv and drugs.tsv
genes_df - data from genes.tsv
relation_df - data from relationships.tsv
Then parses drugs and genes data frames, writing the nodes and saving their
pharmgkb id to dcid mappings. These mappings are used when parsing the
relationships.tsv based data frames. The dataframe, relation_df is filtered
into two dataframes:
drug_gene_df - 'Entity1_type' is 'Chemical' and 'Entity2_type' is 'Gene'
gene_drug_df - 'Entity1_type' is 'Gene' and 'Entity2_type' is 'Chemical'
This makes it easy to parse the drug-gene association based rows since the
types and order of the types are established.
"""
download_pharmgkb_datasets()
mcf_file = open('pharmgkb.mcf', 'w')
# read genes.tsv into a dataframe called genes_df
genes_pharm_to_dcid = {}
genes_df = pd.read_csv('./raw_data/genes/genes.tsv', sep='\t')
genes_df.fillna('', inplace=True)
# convert each row of genes_df to mcf format and write to file
print('writing gene nodes to mcf....')
genes_df.apply(
lambda row: write_gene_row(mcf_file, row, genes_pharm_to_dcid), axis=1)
# use helper function to read chemicals.tsv and drugs.tsv into dataframe
drugs_pharm_to_dcid = {}
drugs_df = get_drugs_df()
# convert each row of drugs_df to mcf format and write to file
print('writing drug nodes to mcf....')
drugs_df.apply(
lambda row: write_drug_row(mcf_file, row, drugs_pharm_to_dcid), axis=1)
# read relationships.tsv into a dataframe called relation_df
relation_df = pd.read_csv('./raw_data/relationships/relationships.tsv',
sep='\t')
relation_df.fillna('', inplace=True)
print('writing ChemicalCompoundGeneAssociation nodes to mcf....')
# get rows of relation_df where Entity1 is a Chemical(Drug) and Entity2 is
# a Gene
drug_gene_df = relation_df[(relation_df['Entity1_type'] == 'Chemical') &
(relation_df['Entity2_type'] == 'Gene')]
# write drug-gene relations (where drug is Entity1) to mcf file
drug_first = True # indicates that Entity1 is a Drug
drug_gene_df.apply(lambda row: write_relation_row(
mcf_file, row, drug_first, genes_pharm_to_dcid, drugs_pharm_to_dcid),
axis=1)
# get rows of relation_df where Entity1 is a Gene and Entity2 is
# a Chemical(Drug)
gene_drug_df = relation_df[(relation_df['Entity1_type'] == 'Gene') &
(relation_df['Entity2_type'] == 'Chemical')]
# write drug-gene relations (where drug is Entity2) to mcf file
drug_first = False # indicates that Entity1 is a Gene
gene_drug_df.apply(lambda row: write_relation_row(
mcf_file, row, drug_first, genes_pharm_to_dcid, drugs_pharm_to_dcid),
axis=1)
mcf_file.close()
if __name__ == '__main__':
main()
| apache-2.0 |
JosmanPS/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
ishank08/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
pgdr/ert | python/python/ert_gui/plottery/plots/observations.py | 3 | 2027 | import math
def plotObservations(plot_context, axes):
ert = plot_context.ert()
key = plot_context.key()
config = plot_context.plotConfig()
case_list = plot_context.cases()
data_gatherer = plot_context.dataGatherer()
if config.isObservationsEnabled() and data_gatherer.hasObservationGatherFunction():
if len(case_list) > 0:
observation_data = data_gatherer.gatherObservationData(ert, case_list[0], key)
if not observation_data.empty:
_plotObservations(axes, config, observation_data, value_column=key)
def _plotObservations(axes, plot_config, data, value_column):
"""
Observations are always plotted on top. z-order set to 1000
Since it is not possible to apply different linestyles to the errorbar, the line_style / fmt is used to toggle
visibility of the solid errorbar, by using the elinewidth parameter.
@type axes: matplotlib.axes.Axes
@type plot_config: PlotConfig
@type data: DataFrame
@type value_column: Str
"""
style = plot_config.observationsStyle()
# adjusting the top and bottom bar, according to the line width/thickness
def cap_size(line_with):
return 0 if line_with == 0 else math.log(line_with, 1.2)+3
# line style set to 'off' toggles errorbar visibility
if style.line_style == '':
style.width = 0
errorbars = axes.errorbar(x=data.index.values, y=data[value_column].values,
yerr=data["STD_%s" % value_column].values,
fmt=style.line_style, ecolor=style.color, color=style.color,
capsize=cap_size(style.width),
capthick=style.width, #same as width/thickness on error line
alpha=style.alpha,
linewidth=0,
marker=style.marker,
ms=style.size,
elinewidth=style.width, zorder=1000)
| gpl-3.0 |
brchiu/tensorflow | tensorflow/contrib/distributions/python/ops/mixture.py | 22 | 21121 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
import tensorflow_probability as tfp
tfd = tfp.distributions
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
components=[
tfd.Normal(loc=-1., scale=0.1),
tfd.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
use_static_graph=False,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
use_static_graph: Calls to `sample` will not rely on dynamic tensor
indexing, allowing for some static graph compilation optimizations, but
at the expense of sampling all underlying distributions in the mixture.
(Possibly useful when running on TPUs).
Default value: `False` (i.e., use dynamic indexing).
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = dict(locals())
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]) as name:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
self._use_static_graph = use_static_graph
if use_static_graph and static_num_components is None:
raise ValueError("Number of categories must be known statically when "
"`static_sample=True`.")
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
if self._use_static_graph:
# This sampling approach is almost the same as the approach used by
# `MixtureSameFamily`. The differences are due to having a list of
# `Distribution` objects rather than a single object, and maintaining
# random seed management that is consistent with the non-static code path.
samples = []
cat_samples = self.cat.sample(n, seed=seed)
for c in range(self.num_components):
seed = distribution_util.gen_new_seed(seed, "mixture")
samples.append(self.components[c].sample(n, seed=seed))
x = array_ops.stack(
samples, -self._static_event_shape.ndims - 1) # [n, B, k, E]
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=cat_samples, # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self._cat,
self._static_event_shape.ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask,
axis=-1 - self._static_event_shape.ndims) # [n, B, E]
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
thomasspiesser/MYpop | MYpop_plots.py | 1 | 13621 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import pylab as p
from numpy import *
from scipy import stats,polyval
import sys
import re
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import rc,cm
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import PathPatch
################ plot percentage of each generation of whole population ##################
def plot_percentage_gen(array_v,save_name):
d= array_v['field']==0
individuals=float(len([value for position, value in ndenumerate(d) if not value]))
max_generation = array_v['generation'].max()
f1 = p.figure()
ax = f1.add_subplot(111)
# ax.set_yscale('log')
liste = []
for i in range(max_generation+1):
tmp_gen = array([array_v[position]['generation'] for position, value in ndenumerate(array_v['field']) if value])
tmp_gen = (len([value for position, value in ndenumerate(tmp_gen) if value==i]))
tmp_percentage = tmp_gen/individuals
liste.append(tmp_percentage)
a1=p.bar(range(max_generation+1),liste,width=0.4,color='#82bcd3')
a2=p.bar(arange(max_generation+1)+0.4,[0.5**i for i in range(1,max_generation+2)],width=0.4,color='#005d37')
p.ylabel('Fraction')
p.xlabel('Genealogical age')
p.xticks(arange(max_generation)+0.4,(range(max_generation)))
p.legend((a1[0],a2[0]),('Simulation','Theoretical'))
p.ylim(0.0001,1)
f1.savefig(save_name)
f1.savefig(save_name.replace('pdf','png'))
# and export data as txt file
save_name=save_name.replace('pdf','txt')
f=open(save_name,'w')
for i,j,k in zip(range(max_generation+1),liste,[0.5**i for i in range(1,max_generation+2)]):
f.write('%s,%s,%s\n'%(i,j,k)) # csv file with time, mean, sd
f.close()
################ plot fraction of V mother retains at bud event over genealogical age ##################
def plot_frac_V_gen(array_v,save_name):
f1 = p.figure(figsize=(8, 4))
tmp_mn=[]
tmp_sd=[]
zahl = max([len(i) for i in array_v['V'].flat if i]) # longest list of X values
nr_cells=array_v.size
tmp=zeros((nr_cells,zahl),dtype=float)
for k1, vector in enumerate(array_v.flat):
if vector['ratios']:
for k2, v in enumerate(vector['ratios']):
tmp[k1,k2] = 1-v
for m in range(zahl):
tmp_list=[n for n in tmp[:,m] if n!=0]
tmp_mn.append(mean(tmp_list))
tmp_sd.append(std(tmp_list))
tmp_mn=array([n for n in tmp_mn if not isnan(n)])
tmp_sd=array(tmp_sd[:len(tmp_mn)])
tmp_mn=tmp_mn[:7] # only for first 7 generations
tmp_sd=tmp_sd[:7] # only for first 7 generations
x_ax=arange(len(tmp_mn))
plt2=p.errorbar(x_ax, tmp_mn, tmp_sd, fmt='s', elinewidth=2 ,capsize=4, ms=10, mec='#82bcd3',mew=2,color='#82bcd3',ecolor='black',barsabove=True)
p.xlim(-0.5,len(tmp_mn)-0.5)
p.xticks(x_ax)
p.ylabel('Division ratio')
p.ylim(0,1)
p.xlabel('Genealogical age')
f1.savefig(save_name)
f1.savefig(save_name.replace('pdf','png'))
# and export data as txt file
save_name=save_name.replace('pdf','txt')
f=open(save_name,'w')
for i,j,k in zip(x_ax, tmp_mn, tmp_sd):
f.write('%s,%s,%s\n'%(i,j,k)) # csv file with time, mean, sd
f.close()
################ plot V at division over genealogical age ##################
def plot_V_gen(array_v,save_name,logit):
f1 = p.figure()
ax = f1.add_subplot(111)
tmp_mn=[]
tmp_sd=[]
zahl = max([len(i) for i in array_v['V'].flat if i]) # longest list of X values
nr_cells=array_v.size
tmp=zeros((nr_cells,zahl),dtype=float)
for k1, vector in enumerate(array_v.flat):
if vector['V_div']:
for k2, v in enumerate(vector['V_div']):
tmp[k1,k2] = v
for m in range(zahl):
if not logit: tmp_list=[n for n in tmp[:,m] if n!=0]
else: tmp_list=[log10(n) for n in tmp[:,m] if n!=0]
tmp_mn.append(mean(tmp_list))
tmp_sd.append(std(tmp_list))
tmp_mn=array([n for n in tmp_mn if not isnan(n)])
tmp_sd=array(tmp_sd[:len(tmp_mn)])
x_ax=range(len(tmp_mn))
plt3=p.errorbar(x_ax, tmp_mn, tmp_sd, fmt='s', elinewidth=2 ,capsize=4, ms=10, mec='#82bcd3',mew=2,color='#82bcd3',ecolor='#82bcd3')
p.xlim(-1,len(tmp_mn))
p.xticks(range(len(tmp_mn)))
if not logit: pass
else: p.yticks(p.yticks()[0], [round(10**q,2) for q in p.yticks()[0]])
p.ylabel('Mean cell volume at division (fL)')
p.xlabel('Genealogical age')
f1.savefig(save_name)
f1.savefig(save_name.replace('pdf','png'))
# and export data as txt file
save_name=save_name.replace('pdf','txt')
f=open(save_name,'w')
for i,j,k in zip(x_ax, tmp_mn, tmp_sd):
f.write('%s,%s,%s\n'%(i,j,k)) # csv file with time, mean, sd
f.close()
################ plot times spend in G1 over genealogical age ##################
def plot_tG1_gen_new(culture,save_name,X,logit):
# max_generation=7
tmp_lists = []
try:
zahl = max([len(i) for i in culture[X].flat if i]) # longest list of X values
except:
zahl = 1
nr_cells=culture.size
tmp2=zeros((nr_cells,zahl),dtype=float)
for k1, cell in enumerate(culture.flat):
if cell[X]:
for k2, x in zip(range(zahl),cell[X]):
if X=='times_in_G1': tmp2[k1,k2] = x
else: tmp2[k1,k2] = x+40 # coz 30mins S and 10mins M
for m in range(zahl):
if not logit: tmp_list=[n for n in tmp2[:,m] if n!=0]
else: tmp_list=[log10(n) for n in tmp2[:,m] if n!=0] #log-scale
tmp_lists.append(tmp_list)
f1 = p.figure()
bp=p.boxplot(tmp_lists, positions=range(len(tmp_lists)))
#change colors of boxplot
p.setp(bp['boxes'],color='#82bcd3',lw=3)
p.setp(bp['whiskers'],color='#82bcd3',lw=3)
p.setp(bp['caps'],color='#82bcd3',lw=3)
p.setp(bp['fliers'],color='#82bcd3',lw=3)
p.setp(bp['medians'],color='#25597c',lw=3)
if logit: p.yticks(p.yticks()[0], [round(10**q,2) for q in p.yticks()[0]])
# p.xlim(-0.5,7.5)
p.ylabel(r'Time in %s (min)'%(X.replace('times_in_','')))
p.xlabel('Genealogical age')
f1.savefig(save_name)
f1.savefig(save_name.replace('pdf','png'))
# and export data as txt file
save_name=save_name.replace('pdf','txt')
f=open(save_name,'w')
for i in tmp_lists:
f.write('%s\n'%(i)) # csv file with time, mean, sd
f.close()
################ plot histogramms of given distribution ##################
def plot_hist(his_data, t_s, xlab, xlim, save_name):
f1 = p.figure()
p.hist(his_data[1:], normed=True, label=[str(i) for i in t_s[1:]])
p.xlabel(xlab)
p.legend(loc='best')
p.xlim(xlim)
p.ylabel('relative frequency')
f1.savefig(save_name)
################# plot lifecycle of a cell: V0 vs. Volume at division ##################
def plot_V0_vs_X(array_v, Y, xlab, ylab, save_name, int_1, int_2):
f1 = p.figure()
i=1
tmp_list=[]
for vector in array_v.flat:
if vector['V0'] and len(vector[Y])>=int_1 and i <= int_2:
p.plot(log(vector['V0'][0]),vector[Y][0], 'o', mec='blue', mfc='white') # plot daughters in blue
p.plot(log(vector['V0'][1:len(vector[Y])]),vector[Y][1:], 'o', mec='red', mfc='white') # and mothers in red
for zahl in range(len(vector[Y])):
tmp_list.append(((vector['V0'][zahl]),vector[Y][zahl]))
i += 1
# bin the data and plot again
tmp_list=sorted(tmp_list,key=lambda g: g[0]) # sort after first element, the V0 value
tmp_list=array(tmp_list)
#p.plot(tmp_list[:,0],tmp_list[:,1], 'og') # plot our original data
maxi, mini = tmp_list[-1,0], tmp_list[0,0]
bins=30
bin_edges=linspace(mini,maxi,bins+1)
mean_values=[]
bin_values=[]
for i in range(bins):
bin_values.append(((bin_edges[i+1]-bin_edges[i])/2.)+bin_edges[i])
tmp_means=[]
for tmp_x, tmp_y in tmp_list:
if tmp_x >= bin_edges[i] and tmp_x <= bin_edges[i+1]: tmp_means.append(tmp_y)
mean_values.append(mean(tmp_means))
#log bins for plotting
bin_values=log(bin_values)
p.plot(bin_values[:7], mean_values[:7], 'o', color='blue') # plot binned data
(a_s,b_s,r,tt,stderr)=stats.linregress(bin_values[:7],mean_values[:7])
p.plot(linspace(2.7,4,20),polyval([a_s,b_s],linspace(2.7,4,20)),'-',color='blue')
p.plot(bin_values[7:], mean_values[7:], 'o', color='red') # plot binned data
(a_s,b_s,r,tt,stderr)=stats.linregress(bin_values[7:],mean_values[7:])
p.plot(linspace(3.5,4.9,20),polyval([a_s,b_s],linspace(3.5,4.9,20)),'-',color='red')
p.xlabel(r"ln(V$_{birth}$)")
p.ylabel(ylab)
#p.title('linear')
f1.savefig(save_name)
################# plot metabolic rate vs biomass ##################
def plot_rate_vs_mass(array_vs,ks):
#array_v=array_vs
#k_growth = ks
colors = ['#800000','#c87942']#,'#008080','#82bcd3']
#col='#82bcd3'
f1 = p.figure()
for array_v, k_growth,col in zip(array_vs,ks,colors):
zahl = max([len(i) for i in array_v['R'].flat if i]) # longest list of X values
nr_cells=array_v.size
tmp2=zeros((nr_cells,zahl),dtype=float)
tmp3=zeros((nr_cells,zahl),dtype=float)
for k1, vector in enumerate(array_v.flat):
if vector['R']:
for k2, r, am, ad, v in zip(range(zahl-len(vector['R']),zahl),vector['R'],vector['B_Am'],vector['B_Ad'],vector['V']):
if vector['S_entry'] != 0:
tmp2[k1,k2] = log2(r + am + ad) # cell mass
tmp3[k1,k2] = log2(r * (am + ad) * k_growth / v) # metabolic rate
tmp2 = array([i for i in tmp2 if i[-1]!=0])
tmp3 = array([i for i in tmp3 if i[-1]!=0])
p.plot(tmp2[:,-1],tmp3[:,-1],'o',color=col)
(a_s,b_s,r,tt,stderr)=stats.linregress(tmp2[:,-1],tmp3[:,-1])
p.text(min(tmp2[:,-1])+1,min(tmp3[:,-1]),'slope = %s'%(round(a_s,2)))
p.plot(linspace(min(tmp2[:,-1]),max(tmp2[:,-1]),2),polyval([a_s,b_s],linspace(min(tmp2[:,-1]),max(tmp2[:,-1]),2)),'-',color='#25597c',lw=3)
p.xlabel('Cell mass')
p.ylabel('Metabolic rate')
f1.savefig('rate_vs_mass_0.02_and_0.03_SG2_68_G2_cells.pdf')
################# plot and calculated mass doubling time ##################
def plot_calc_mdt(array_v,save_name):
zahl = max([len(i) for i in array_v['B_R'].flat if i]) # longest list of X values
nr_cells=array_v.size
tmp2=zeros((nr_cells,zahl),dtype=float)
for k1, vector in enumerate(array_v.flat):
if vector['B_R']:
for k2, v, am, ad in zip(range(zahl-len(vector['B_R']),zahl),vector['B_R'],vector['B_Am'],vector['B_Ad']):
tmp2[k1,k2] = v+am+ad
summed=sum(tmp2, axis=0)
f1 = p.figure()
x_ax = range(len(summed))
p.plot(x_ax,log2(summed),'o',ms=10,mec='#82bcd3',color='#82bcd3')
start=len(x_ax)/4
(a_s,b_s,r,tt,stderr)=stats.linregress(x_ax[start:],log2(summed)[start:])
p.plot(linspace(0,len(summed),100),polyval([a_s,b_s],linspace(0,len(summed),100)),'-',color='#25597c',lw=3)
text_loc = max(p.ylim())
p.text(start,text_loc-1,r"%s Minutes generation time"%(round(1/a_s,2)))
p.text(start,text_loc-2,r"%s Hours"%(round(1/(a_s*60),2)))
p.text(start,text_loc-3,r"%s Rate"%(round((a_s*60),2)))
p.ylabel('log2(A+R)')
p.xlabel('Time (min)')
f1.savefig(save_name)
f1.savefig(save_name.replace('pdf','png'))
# and export data as txt file
save_name=save_name.replace('pdf','txt')
f=open(save_name,'w')
for i,j in zip(x_ax,log2(summed)):
f.write('%s,%s\n'%(i,j)) # csv file with time, mean, sd
f.close()
################# plot lifecycle of a cell: [X] ##################
def plot_X_life(array_v, X, xlab, ylab, save_name, logit):
f1 = p.figure()
zahl = max([len(i) for i in array_v[X].flat if i]) # longest list of X values
nr_cells=array_v.size
tmp=zeros((nr_cells,zahl),dtype=[('value',float),('field',bool)])
tmp_mn=[]
tmp_sd=[]
for k1, vector in enumerate(array_v.flat):
if vector[X]:
for k2, v in zip(range(zahl-len(vector[X]),zahl),vector[X]):
tmp[k1,k2] = (v,True)
if not logit: p.plot(range(zahl-len(vector[X]),zahl),vector[X], alpha=0.1)
else: p.plot(range(zahl-len(vector[X]),zahl),log10(vector[X]), alpha=0.1) #log-scale
for m in range(zahl):
if not logit: tmp_list=[n for n,b in tmp[:,m] if b==True]
else: tmp_list=[log10(n) for n,b in tmp[:,m] if b==True] #log-scale
tmp_mn.append(mean(tmp_list))
tmp_sd.append(std(tmp_list))
tmp_mn=array(tmp_mn)
tmp_sd=array(tmp_sd)
p1,=p.plot(range(zahl-len(tmp_mn),zahl), tmp_mn, color='#25597c',lw=3)
p.fill_between(range(zahl-len(tmp_mn),zahl), tmp_mn-tmp_sd, tmp_mn+tmp_sd, color='#82bcd3', alpha=1 )
p.plot(range(zahl-len(tmp_mn),zahl), tmp_mn-tmp_sd, color='black',lw=0.3) # plot black line along standart deviation lower end
p.plot(range(zahl-len(tmp_mn),zahl), tmp_mn+tmp_sd, color='black',lw=0.3) # plot black line along standart deviation upper end
r = p.Rectangle((0, 0), 1, 1,facecolor='#82bcd3', alpha=1,edgecolor='black',lw=0.3) # creates rectangle patch for legend use.
p.legend([p1,r],('Mean','Standard deviation'),loc='best',shadow=True)
if not logit:
pass
else: p.yticks(p.yticks()[0], [round(10**q,2) for q in p.yticks()[0]])
# p.xlim(850,1250) # to zoom in
p.xlabel(xlab)
p.ylabel(ylab)
f1.savefig(save_name)
f1.savefig(save_name.replace('pdf','png'))
# and put in txt file
save_name=save_name.replace('pdf','txt')
f=open(save_name,'w')
for i,j,k in zip(range(zahl-len(tmp_mn),zahl),tmp_mn,tmp_sd):
f.write('%s,%s,%s\n'%(i,j,k)) # csv file with time, mean, sd
f.close()
################# plot [X] vs [Y] ##################
def plot_X_Y(tmp_array_v, X, Y, xlab, ylab, save_name, int_1, int_2):
f1 = p.figure()
ax = Axes3D(f1)
#ax.view_init(16, -75)
for array_v,z in zip(tmp_array_v,[30, 20, 10, 0]):
i=1
for vector in array_v.flat:
if vector[X] and len(vector[X])>=int_1 and i <= int_2:
p.plot(vector[X], vector[Y], z, 'o', alpha=0.1)
i += 1
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
#p.show()
f1.savefig(save_name)
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/tests/test_backend_pgf.py | 9 | 6311 | # -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import shutil
import numpy as np
import nose
from nose.plugins.skip import SkipTest
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.compat import subprocess
from matplotlib.testing.compare import compare_images, ImageComparisonFailure
from matplotlib.testing.decorators import _image_directories
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
def check_for(texsystem):
header = """
\\documentclass{minimal}
\\usepackage{pgf}
\\begin{document}
\\typeout{pgfversion=\\pgfversion}
\\makeatletter
\\@@end
"""
try:
latex = subprocess.Popen(["xelatex", "-halt-on-error"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = latex.communicate(header.encode("utf8"))
except OSError:
return False
return latex.returncode == 0
def switch_backend(backend):
def switch_backend_decorator(func):
def backend_switcher(*args, **kwargs):
try:
prev_backend = mpl.get_backend()
mpl.rcdefaults()
plt.switch_backend(backend)
result = func(*args, **kwargs)
finally:
plt.switch_backend(prev_backend)
return result
return nose.tools.make_decorator(func)(backend_switcher)
return switch_backend_decorator
def compare_figure(fname, savefig_kwargs={}):
actual = os.path.join(result_dir, fname)
plt.savefig(actual, **savefig_kwargs)
expected = os.path.join(result_dir, "expected_%s" % fname)
shutil.copyfile(os.path.join(baseline_dir, fname), expected)
err = compare_images(expected, actual, tol=14)
if err:
raise ImageComparisonFailure('images not close: %s vs. '
'%s' % (actual, expected))
def create_figure():
plt.figure()
x = np.linspace(0, 1, 15)
plt.plot(x, x ** 2, "b-")
plt.fill_between([0., .4], [.4, 0.], hatch='//', facecolor="lightgray",
edgecolor="red")
plt.plot(x, 1 - x**2, "g>")
plt.plot([0.9], [0.5], "ro", markersize=3)
plt.text(0.9, 0.5, 'unicode (ü, °, µ) and math ($\\mu_i = x_i^2$)',
ha='right', fontsize=20)
plt.ylabel('sans-serif, blue, $\\frac{\\sqrt{x}}{y^2}$..',
family='sans-serif', color='blue')
# test compiling a figure to pdf with xelatex
@switch_backend('pgf')
def test_xelatex():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
create_figure()
compare_figure('pgf_xelatex.pdf')
# test compiling a figure to pdf with pdflatex
@switch_backend('pgf')
def test_pdflatex():
if not check_for('pdflatex'):
raise SkipTest('pdflatex + pgf is required')
rc_pdflatex = {'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ['\\usepackage[utf8x]{inputenc}',
'\\usepackage[T1]{fontenc}']}
mpl.rcParams.update(rc_pdflatex)
create_figure()
compare_figure('pgf_pdflatex.pdf')
# test updating the rc parameters for each figure
@switch_backend('pgf')
def test_rcupdate():
if not check_for('xelatex') or not check_for('pdflatex'):
raise SkipTest('xelatex and pdflatex + pgf required')
rc_sets = []
rc_sets.append({'font.family': 'sans-serif',
'font.size': 30,
'figure.subplot.left': .2,
'lines.markersize': 10,
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex'})
rc_sets.append({'font.family': 'monospace',
'font.size': 10,
'figure.subplot.left': .1,
'lines.markersize': 20,
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ['\\usepackage[utf8x]{inputenc}',
'\\usepackage[T1]{fontenc}',
'\\usepackage{sfmath}']})
for i, rc_set in enumerate(rc_sets):
mpl.rcParams.update(rc_set)
create_figure()
compare_figure('pgf_rcupdate%d.pdf' % (i + 1))
# test backend-side clipping, since large numbers are not supported by TeX
@switch_backend('pgf')
def test_pathclip():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
plt.figure()
plt.plot([0., 1e100], [0., 1e100])
plt.xlim(0, 1)
plt.ylim(0, 1)
# this test passes if compiling/saving to pdf works (no image comparison)
plt.savefig(os.path.join(result_dir, "pgf_pathclip.pdf"))
# test mixed mode rendering
@switch_backend('pgf')
def test_mixedmode():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
plt.figure()
plt.pcolor(X**2 + Y**2).set_rasterized(True)
compare_figure('pgf_mixedmode.pdf')
# test bbox_inches clipping
@switch_backend('pgf')
def test_bbox_inches():
if not check_for('xelatex'):
raise SkipTest('xelatex + pgf is required')
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.plot(range(5))
ax2 = fig.add_subplot(122)
ax2.plot(range(5))
plt.tight_layout()
bbox = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
compare_figure('pgf_bbox_inches.pdf', savefig_kwargs={'bbox_inches': bbox})
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
breznak/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mathtext.py | 69 | 101723 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email mdroe@stsci.edu, but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.ymax, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
return LOAD_FORCE_AUTOHINT
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = unichr(info.num)
self.svg_glyphs.append(
(info.font, info.fontsize, thetext, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return glyph.height/64.0/2.0 + 256.0/64.0 * dpi/72.0
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, 'it', 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
fontmap = {}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
fontmap = {}
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
new_fontname = fontname
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname == 'it' and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s'" %
(fontname, sym.encode('ascii', 'backslashreplace')),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSize1',
2 : 'STIXSize2',
3 : 'STIXSize3',
4 : 'STIXSize4',
5 : 'STIXSize5'
}
fontmap = {}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if self._sans and mapping is None:
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping[font_class]
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = 'it'
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr(uniindex)))
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm')
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 4
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
placeable <<(accent
^ function
^ (c_over_c | symbol)
^ group
^ frac
^ sqrt
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
autoDelim
^ OneOrMore(simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('it', 'rm', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = 'it'
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, 'it', 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent'
}
_wide_accents = set(r"widehat widetilde".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height + hlist.depth + rule_thickness * 2.0
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth * 0.5
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width) + thickness * 10.
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state), # rule
Vbox(0, thickness * 4.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, 'it', '=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
hlist = Hlist([vlist, Hbox(thickness * 2.)])
return [hlist]
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle.asList())
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
| agpl-3.0 |
andaag/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
tdhopper/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/finance_demo.py | 6 | 1134 | #!/usr/bin/env python
from pylab import *
from matplotlib.dates import DateFormatter, WeekdayLocator, HourLocator, \
DayLocator, MONDAY
from matplotlib.finance import quotes_historical_yahoo, candlestick,\
plot_day_summary, candlestick2
# (Year, month, day) tuples suffice as args for quotes_historical_yahoo
date1 = ( 2004, 2, 1)
date2 = ( 2004, 4, 12 )
mondays = WeekdayLocator(MONDAY) # major ticks on the mondays
alldays = DayLocator() # minor ticks on the days
weekFormatter = DateFormatter('%b %d') # Eg, Jan 12
dayFormatter = DateFormatter('%d') # Eg, 12
quotes = quotes_historical_yahoo('INTC', date1, date2)
if len(quotes) == 0:
raise SystemExit
fig = figure()
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(111)
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(weekFormatter)
#ax.xaxis.set_minor_formatter(dayFormatter)
#plot_day_summary(ax, quotes, ticksize=3)
candlestick(ax, quotes, width=0.6)
ax.xaxis_date()
ax.autoscale_view()
setp( gca().get_xticklabels(), rotation=45, horizontalalignment='right')
show()
| gpl-2.0 |
sarahgrogan/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.