repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
schets/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
kevinhikali/ml_kevin | bottom/logistic_regression.py | 1 | 1151 | # -*- coding: utf-8 -*-
"""
@author: kevinhikali
@email: hmingwei@gmail.com
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from math import exp
# global variable
SampleTh = np.array([[2],
[5]])
# function
def h(th, x):
global SampleTh
return np.dot(th.transpose(), x)
nSample = 40
nParam = 2
SampleX = abs(np.random.rand(nParam, nSample))
# if bias required
SampleX[0, :] = np.ones(nSample)
BorderV = np.dot(SampleTh.transpose(), SampleX)[0]
SampleV = 4.5*np.ones(nSample) + 3.5*(np.random.rand(nSample)-0.5*np.ones(nSample))
SampleY = np.zeros(nSample)
for i in range(nSample):
if SampleV[i] > BorderV[i]:
SampleY[i] = 0
else:
SampleY[i] = 1
th = np.zeros(nParam)
x = np.zeros(nParam)
y = np.zeros(1)
fig = plt.figure()
plt_x = 0
for i in range(nSample):
global SampleTh
plt_x = SampleX[1, i]
x = SampleX[:, i]
# origin
y = h(SampleTh, x)
plot, = plt.plot(plt_x, y, 'go')
# sample
y = SampleY[i]
plot, = plt.plot(plt_x, y, 'bo')
# trained
y = h(th, x)
plot, = plt.plot(plt_x, y, 'ro')
plt.show()
| gpl-3.0 |
brentp/vcfanno | scripts/paper/parallelization-figure.py | 2 | 2106 | import toolshed as ts
lookup = {'ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites': '1000G',
'ExAC.r0.3.sites.vep': 'ExAC'}
data = {'1000G': [], 'ExAC': []}
"""
method procs time query
var 20 888.29 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 19 897.02 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 18 895.35 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 17 909.24 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 16 916.43 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 15 945.61 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 14 981.14 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 13 1051.26 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
var 12 1126.58 seconds ALL.wgs.phase3_shapeit2_mvncall_integrated_v5a.20130502.sites
"""
for d in ts.reader(1):
time = float(d['time'].split()[0])
key = lookup[d['query']]
data[key].append(time)
for k in data:
data[k] = data[k][::-1]
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('white')
from matplotlib import rcParams
rcParams['font.family'] = 'Arial'
rcParams['font.size'] = 18
N = len(data.values()[0])
N = 16
markers = 'os'
for j, k in enumerate(data):
values = data[k]
plt.plot(range(1, N + 1), [values[0] / values[i] for i in range(N)],
markers[j] + "-", label=k)
plt.ylabel("Speed-up relative to 1 process")
plt.xlabel("Number of processes")
plt.plot(range(1, N + 1), [i for i in range(1, N + 1)], '--',
c="0.78", lw=2)
plt.tight_layout()
plt.legend(loc="upper left")
ax = plt.gca()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(16)
for item in ax.get_legend().get_texts():
item.set_fontsize(13)
plt.xlim(xmin=1, xmax=N+0.16)
plt.ylim(ymin=1, ymax=8)
sns.despine(left=True, bottom=True)
plt.savefig('figure-4.pdf')
plt.show()
| mit |
icfaust/TRIPPy | TRIPPy/plot/pyplot.py | 1 | 5770 | import scipy
import scipy.special
import matplotlib.pyplot as plt
def plotTokamak(tokamak, pltobj=None, axis=True, pargs=None, **kwargs):
if pltobj is None:
pltobj = plt
if pargs is None:
pltobj.plot(tokamak.sagi.s, tokamak.norm.s, **kwargs)
else:
pltobj.plot(tokamak.sagi.s, tokamak.norm.s, pargs, **kwargs)
if axis:
#need to correct this!!
plt.gca().set_aspect('equal')
pltobj.autoscale(tight=True)
def plotLine(line, invessel=True, ds=2.5e-3, pargs=None, pltobj=None, **kwargs):
try:
if invessel:
temp = line(scipy.mgrid[line.norm.s[-2]:line.norm.s[-1]:ds])
else:
temp = line(scipy.mgrid[line.norm.s[0]:line.norm.s[-1]:ds])
if pltobj is None:
pltobj = plt
if not pargs is None:
pltobj.plot(temp.r0(), temp.x2(), pargs, **kwargs)
else:
pltobj.plot(temp.r0(), temp.x2(), **kwargs)
except AttributeError:
for i in line:
plotLine(i, invessel=invessel, pargs=pargs, pltobj=pltobj, **kwargs)
def sinogramLine(beam, r, z, invessel=True, ds=2.5e-3, pargs=None, pltobj=None, **kwargs):
try:
if invessel:
temp = beam(scipy.mgrid[beam.norm.s[-2]:beam.norm.s[-1]:ds])
else:
temp = beam(scipy.mgrid[beam.norm.s[0]:beam.norm.s[-1]:ds])
# deal with branch cut
temp0 = temp.t0(r, z)
temp2 = temp.t2(r, z)
temp = scipy.arange(temp0.size)[abs(temp2[1:] - temp2[:-1]) > scipy.pi]
print(temp)
if len(temp) > 0:
temp0 = scipy.insert(temp0, temp+1, None)
temp2 = scipy.insert(temp2, temp+1, None)
if pltobj is None:
pltobj = plt
if not pargs is None:
pltobj.plot(temp2,temp0, pargs, **kwargs)
else:
pltobj.plot(temp2,temp0, **kwargs)
except AttributeError:
for i in beam:
sinogramLine(i, r, z, invessel=invessel, pargs=pargs, pltobj=pltobj, **kwargs)
def image(r, z, out, pltobj=None, **kwargs):
if pltobj is None:
pltobj = plt
pltobj.imshow(out.T,origin='lower',extent = (r.min(),r.max(),z.min(),z.max()), **kwargs)
def plotBF(data, r, z, rcent, zcent, rmax, l=range(15), mcos=[0], msin=[], **kwargs):
rgrid,zgrid = scipy.meshgrid(r,z)
theta = scipy.arctan2(zgrid-zcent,rgrid-rcent)
rgrid = scipy.sqrt((rgrid-rcent)**2 + (zgrid-zcent)**2)/rmax
output = scipy.zeros(rgrid.shape)
idx = 0
u = scipy.unique(mcos+msin)
zeros = scipy.zeros((len(u),len(l)))
for i in xrange(len(u)):
zeros[i] = scipy.special.jn_zeros(u[i],zeros.shape[1])
for m in mcos:
for i in l:
output += data[idx]*scipy.special.jn(m,zeros[m,i]*rgrid)*scipy.cos(m*theta)
idx += 1
for m in msin:
for i in l:
output += data[idx]*scipy.special.jn(m,zeros[m,i]*rgrid)*scipy.sin(m*theta)
idx += 1
scipy.place(output,rgrid > rmax, 0)
image(r,z,output.T,**kwargs)
def plotBFradial(data, l=range(15), mcos=[0], msin=[], err=None, **kwargs):
rgrid = scipy.linspace(0,1,1e2)
output = scipy.zeros(rgrid.shape)
idx = 0
idxe = 0
u = scipy.unique(mcos+msin)
zeros = scipy.zeros((len(u),len(l)))
for i in xrange(len(u)):
zeros[i] = scipy.special.jn_zeros(u[i],zeros.shape[1])
for m in mcos:
errorout = scipy.zeros(rgrid.shape)
output = scipy.zeros(rgrid.shape)
for i in l:
output += data[idx]*scipy.special.jn(m,zeros[m,i]*rgrid)
idx += 1
if m > 1:
labelin = r'$\cos$'+str(m)+r'$\theta$'
elif m == 1:
labelin = r'$\cos \theta$'
else:
labelin = r'radial'
plt.plot(rgrid,
output,
label=labelin,
**kwargs)
if not err is None:
outpute= scipy.zeros(rgrid.shape)
for i in l:
outpute += err[idxe]*scipy.special.jn(m,zeros[m,i]*rgrid)
idxe += 1
plt.fill_between(rgrid,output-outpute,output+outpute,color='k',alpha=.3)
for m in msin:
output = scipy.zeros(rgrid.shape)
for i in l:
output += data[idx]*scipy.special.jn(m,zeros[m,i]*rgrid)
idx += 1
if m > 1:
labelin =r'$\sin$'+str(m)+r'$\theta$'
else:
labelin =r'$\sin \theta$'
plt.plot(rgrid,
output,
label=labelin,
**kwargs)
if not err is None:
outpute= scipy.zeros(rgrid.shape)
for i in l:
outpute += err[idxe]*scipy.special.jn(m,zeros[m,i]*rgrid)
idxe += 1
plt.fill_between(rgrid,output-outpute,output+outpute,color='k',alpha=.3)
def plotBFbright(sens,bright,prof,beams):
temp = scipy.dot(sens,prof).T[0:len(bright)]
for i in xrange(len(temp)):
temp[i] *= beams[i].etendue/(4*scipy.pi)
plt.plot(scipy.arange(len(temp)),temp,marker='s',color='k',linestyle=' ',label='reconstruction')
plt.plot(scipy.arange(len(bright)),bright,marker='$\circ$',linestyle=' ',label='model')
plt.ylabel(r'Current [$\mu$A]')
plt.xlabel(r'Chord Number')
plt.ylim((0,8))
plt.title(r'Chord Signals')
plt.text(8.5,1,r'(1)',size=20)
plt.text(28.5,1,r'(2)',size=20)
plt.text(48.5,1,r'(3)',size=20)
plt.gca().axes.get_xaxis().set_ticks_position('bottom')
plt.gca().axes.get_yaxis().set_ticks_position('left')
plt.gca().legend(loc='upper right',numpoints=1)
def test():
print('nothing')
| mit |
stupid-coder/lwan | tools/benchmark.py | 6 | 4379 | #!/usr/bin/python
import sys
import json
import commands
import time
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
def clearstderrline():
sys.stderr.write('\033[2K')
def weighttp(url, n_threads, n_connections, n_requests, keep_alive):
keep_alive = '-k' if keep_alive else ''
command = 'weighttp %(keep_alive)s ' \
'-t %(n_threads)d ' \
'-c %(n_connections)d ' \
'-n %(n_requests)d ' \
'-j ' \
'%(url)s 2> /dev/null' % locals()
clearstderrline()
sys.stderr.write('*** %s\r' % command)
output = commands.getoutput(command)
return json.loads(output)
def weighttp_has_json_output():
output = commands.getoutput('weighttp -j')
return not 'unknown option: -j' in output
def steprange(initial, final, steps=10):
step = (final - initial) / steps
while initial <= final:
yield initial
initial += step
def sleepwithstatus(msg, period):
slept = 0
spinner = 0
while slept <= period:
clearstderrline()
sys.stderr.write('\r%s: %s' % (msg, '/|\\-'[spinner % 4]))
time.sleep(0.1)
slept += 0.1
spinner += 1
sys.stderr.write('\r')
clearstderrline()
def cmdlineboolarg(arg):
has_arg = False
if arg in sys.argv:
has_arg = True
sys.argv.remove(arg)
return has_arg
def cmdlineintarg(arg, default=0):
value = default
if arg in sys.argv:
index = sys.argv.index(arg)
del sys.argv[index]
try:
value = int(sys.argv[index])
except ValueError:
print 'Argument is of invalid type for argument %s, assuming default (%d)' % (arg, default)
finally:
del sys.argv[index]
return value
class CSVOutput:
def header(self):
print 'keep_alive,n_connections,rps,kbps,2xx,3xx,4xx,5xx'
def footer(self):
clearstderrline()
def log(self, keep_alive, n_connections, rps, kbps, _2xx, _3xx, _4xx, _5xx):
clearstderrline()
print ','.join(str(token) for token in
(int(keep_alive), n_connections, rps, kbps, _2xx, _3xx, _4xx, _5xx))
class MatplotlibOutput:
def __init__(self, xkcd=False):
self.xkcd = xkcd
def header(self):
self.n_connections = []
self.rps = {'keep-alive': [], 'close': []}
def _plot(self):
plt.xlabel('# connections')
plt.ylabel('Requests/s')
n_connections = self.n_connections[:len(self.rps['close'])]
plt.plot(n_connections, self.rps['keep-alive'], label='Keep-Alive')
plt.plot(n_connections, self.rps['close'], label='Close',
marker='o', linestyle='--', color='r')
plt.title('Web Server Benchmark')
plt.legend()
plt.show()
def footer(self):
if self.xkcd:
with plt.xkcd():
self._plot()
else:
self._plot()
def log(self, keep_alive, n_connections, rps, kbps, _2xx, _3xx, _4xx, _5xx):
self.n_connections.append(n_connections)
if keep_alive:
self.rps['keep-alive'].append(rps)
else:
self.rps['close'].append(rps)
if __name__ == '__main__':
if not weighttp_has_json_output():
print 'This script requires a special version of weighttp which supports JSON'
print 'output. Get it at http://github.com/lpereira/weighttp'
sys.exit(1)
plot = cmdlineboolarg('--plot')
xkcd = cmdlineboolarg('--xkcd')
n_threads = cmdlineintarg('--threads', 2)
n_requests = cmdlineintarg('--request', 1000000)
keep_alive_timeout = cmdlineintarg('--keep-alive-timeout', 5)
n_conn_start = cmdlineintarg('--start-conn', 100)
n_conn_end = cmdlineintarg('--end-conn', 60000)
n_conn_step = cmdlineintarg('--conn-step', 10)
url = sys.argv[-1] if len(sys.argv) > 1 else 'http://localhost:8080/100.html'
if plt is None:
if plot:
print 'Matplotlib not installed!'
sys.exit(1)
output = CSVOutput()
elif plot:
output = MatplotlibOutput(xkcd)
else:
output = CSVOutput()
output.header()
for keep_alive in (True, False):
for n_connections in steprange(n_conn_start, n_conn_end, n_conn_step):
results = weighttp(url, n_threads, n_connections, n_requests, keep_alive)
status = results['status_codes']
output.log(keep_alive, n_connections, results['reqs_per_sec'],
results['kbyte_per_sec'], status['2xx'], status['3xx'],
status['4xx'], status['5xx'])
sleepwithstatus('Waiting for keepalive connection timeout', keep_alive_timeout * 1.1)
output.footer()
| gpl-2.0 |
jjbrin/trading-with-python | lib/functions.py | 76 | 11627 | # -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df) | bsd-3-clause |
DonBeo/scikit-learn | sklearn/metrics/__init__.py | 10 | 3328 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
yask123/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
galtys/galtys-addons | html_reports/controllers/reports.py | 1 | 1044 | import openerp.addons.web.http as oeweb
import werkzeug.utils
import werkzeug.wrappers
import openerp
from openerp import pooler
from openerp import SUPERUSER_ID
from werkzeug.wrappers import Response
from mako.template import Template
from mako.runtime import Context
from StringIO import StringIO
from openerp.modules.module import get_module_resource
from openerp.modules.module import get_module_path
from mako.template import Template
from mako.runtime import Context
from StringIO import StringIO
import os
import datetime
import datetime as DT
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.dates import YEARLY, DAILY,WEEKLY,MONTHLY,DateFormatter, rrulewrapper, RRuleLocator, drange
import numpy as np
import datetime
import calendar
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
import matplotlib.dates as mdates
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
klusta-team/klustaviewa | klustaviewa/views/tests/test_correlogramsview.py | 2 | 1678 | """Unit tests for correlograms view."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import numpy as np
import numpy.random as rnd
import pandas as pd
from klustaviewa.views.tests.mock_data import (setup, teardown,
nspikes, nclusters, nsamples, nchannels, fetdim, ncorrbins, corrbin,
create_baselines, create_correlograms)
from kwiklib.dataio import KlustersLoader
from kwiklib.dataio.selection import select
from kwiklib.dataio.tools import check_dtype, check_shape
from klustaviewa import USERPREF
from klustaviewa.views import CorrelogramsView
from klustaviewa.views.tests.utils import show_view, get_data
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_correlogramsview():
keys = ('clusters_selected,cluster_colors').split(',')
data = get_data()
kwargs = {k: data[k] for k in keys}
kwargs['correlograms'] = create_correlograms(kwargs['clusters_selected'],
ncorrbins)
kwargs['baselines'] = create_baselines(kwargs['clusters_selected'])
kwargs['ncorrbins'] = ncorrbins
kwargs['corrbin'] = corrbin
kwargs['operators'] = [
lambda self: self.view.change_normalization('uniform'),
lambda self: self.view.change_normalization('row'),
lambda self: (self.close()
if USERPREF['test_auto_close'] != False else None),
]
# Show the view.
show_view(CorrelogramsView, **kwargs)
| bsd-3-clause |
mquezada/tweets-summarizer | src/doc2vec.py | 1 | 1688 | import gensim
import numpy as np
from collections import namedtuple
from load_data import expanded_urls, df
from process_text import process, replace_map_url
from model_documents import docs
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from sklearn.cluster import KMeans
docs
tweet_url_regex = r'http\S+'
ids_with_url = df.text.str.contains(tweet_url_regex, regex=True, na=False)
df_urls = df.loc[ids_with_url]
class ProcessedTweets:
def __iter__(self):
for key, value in docs.items():
yield process(replace_map_url(value))
sentences = ProcessedTweets()
docs_list = []
for i,value in enumerate(sentences):
analyzedDocument = namedtuple('AnalyzedDocument', 'words tags')
words=value
tags=[i]
docs_list.append(analyzedDocument(words, tags))
model = gensim.models.Doc2Vec(docs_list)
print(len(docs))
print(model.most_similar('nepal',topn=5))
print(model.most_similar('food',topn=5))
print(model.doesnt_match('nepal food asdf'))
# urls = sorted(filter(lambda x: x.startswith('http'), model.vocab.keys()))
#
# km = KMeans(n_clusters=7)
# km.fit(model[urls])
#
# data = np.array([model[w] for w in model.vocab.keys()])
# words = [w for w in model.vocab.keys()]
# for i in range(5):
# for j in range(len(model[urls][km.labels_ == i])):
# print(model.similar_by_vector(model[urls][km.labels_ == i][j], topn=1))
# print("=" * 10)
# print(model.most_similar('murder',topn=5))
# print(model.most_similar('pistoriu',topn=5))
# print(model.most_similar('trial',topn=5))
# print(model.most_similar('girlfriend',topn=5))
# print(model.most_similar('women',topn=5))
| gpl-3.0 |
evgchz/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
DerPhysikeR/pywbm | pywbm.py | 1 | 1782 | #!/usr/bin/env python
"""
2017-05-13 21:05:35
@author: Paul Reiter
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import hankel2
from pywbm import Subdomain
def vn(x, y, z, k):
# incident velocity on left side
# return (x == 0).astype(complex)*1j/(k*z)
# incident velocity on left side
return (x == 0).astype(complex)*0
# sin-shaped velocity distribution on the bottom
# return (y == 0).astype(complex)*1j/(k*z)*(np.sin(np.pi*x/lx))
# sin-shaped velocity distribution on the left
# return (x == 0).astype(complex)*1j/(k*z)*(np.sin(np.pi*y/ly))
# incident velocity on the bottom
# return (y == 0).astype(complex)*1j/(k*z)
# incident velocity on the left and the bottom
# return np.logical_or((x == 0), (y == 0)).astype(complex)*1j/(k*z)
def zn(x, y, z, k):
# impedance of k*z on the right side
# return (x == 2).astype(complex)*z
return np.ones_like(x)*z
def pp(x, y, k):
r = np.sqrt((x-.5)**2 + (y-.5)**2)
return hankel2(0, k*r)
def gpp(n, x, y, k):
r = np.sqrt((x-.5)**2 + (y-.5)**2)
return k*(n[0]*(x-.5)/r*(hankel2(-1, k*r)/2 - hankel2(1, k*r)/2) +
n[1]*(y-.5)/r*(hankel2(-1, k*r)/2 - hankel2(1, k*r)/2))
if __name__ == '__main__':
z = 1.205*343.4
k = 2*np.pi*800/343.4
lx, ly, n = 2, 1, 20
nodes = [(0, 0), (lx, 0), (lx, ly), (0, ly)]
elements = [(0, 1), (1, 2), (2, 3), (3, 0)]
kinds = ['z', 'z', 'z', 'z']
functions = [zn, zn, zn, zn]
sd = Subdomain(nodes, elements, kinds, functions, [(pp, gpp)])
sd.solve(z, k, n, vn)
x, y = np.meshgrid(np.linspace(0, lx, 84), np.linspace(0, ly, 44))
z = np.real(sd.field_solution(x, y, z, k, n, vn))
plt.contourf(x, y, z)
plt.colorbar()
plt.show()
| gpl-3.0 |
abimannans/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/io/clipboard.py | 14 | 2947 | """ io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
If unspecified, `sep` defaults to '\s+'
Returns
-------
parsed : DataFrame
"""
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t seperation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines)>1 and len(counts) == 1 and counts.pop() != 0:
kwargs['sep'] = '\t'
if kwargs.get('sep') is None and kwargs.get('delim_whitespace') is None:
kwargs['sep'] = '\s+'
return read_table(StringIO(text), **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
obj.to_csv(buf, sep=sep, **kwargs)
clipboard_set(buf.getvalue())
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| mit |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/tests/test_suffix_filter.py | 1 | 25219 | import unittest
from nose.tools import assert_equal, assert_list_equal, nottest, raises
from py_stringmatching.tokenizer.delimiter_tokenizer import DelimiterTokenizer
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
import numpy as np
import pandas as pd
from py_stringsimjoin.filter.suffix_filter import SuffixFilter
from py_stringsimjoin.utils.converter import dataframe_column_to_str
from py_stringsimjoin.utils.generic_helper import COMP_OP_MAP, \
remove_redundant_attrs
from py_stringsimjoin.utils.simfunctions import get_sim_function
# test SuffixFilter.filter_pair method
class FilterPairTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.qg2 = QgramTokenizer(2)
# tests for JACCARD measure
def test_jac_dlm_08_prune(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy cc zz ww',
self.dlm, 'JACCARD', 0.8, False, False, True)
def test_jac_dlm_08_pass(self):
self.test_filter_pair('aa bb cc dd ee', 'xx aa cc dd ee',
self.dlm, 'JACCARD', 0.8, False, False, False)
# tests for COSINE measure
def test_cos_dlm_08_prune(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy cc zz ww',
self.dlm, 'COSINE', 0.8, False, False, True)
def test_cos_dlm_08_pass(self):
self.test_filter_pair('aa bb cc dd ee', 'xx aa cc dd ee',
self.dlm, 'COSINE', 0.8, False, False, False)
# tests for DICE measure
def test_dice_dlm_08_prune(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy cc zz ww',
self.dlm, 'DICE', 0.8, False, False, True)
def test_dice_dlm_08_pass(self):
self.test_filter_pair('aa bb cc dd ee', 'xx aa cc dd ee',
self.dlm, 'DICE', 0.8, False, False, False)
# tests for OVERLAP measure
def test_overlap_dlm_2_prune(self):
self.test_filter_pair('dd ee', 'yy zz',
self.dlm, 'OVERLAP', 2, False, False, True)
def test_overlap_dlm_2_pass(self):
self.test_filter_pair('dd zz', 'yy zz',
self.dlm, 'OVERLAP', 2, False, False, False)
def test_overlap_dlm_empty(self):
self.test_filter_pair('', '',
self.dlm, 'OVERLAP', 1, False, False, True)
def test_overlap_dlm_empty_with_allow_empty(self):
self.test_filter_pair('', '',
self.dlm, 'OVERLAP', 1, True, False, True)
# tests for EDIT_DISTANCE measure
def test_edit_dist_qg2_prune(self):
self.test_filter_pair('67126790', '26123485',
self.qg2, 'EDIT_DISTANCE', 1, False, False, True)
def test_edit_dist_qg2_pass(self):
self.test_filter_pair('128690', '129695',
self.qg2, 'EDIT_DISTANCE', 2, False, False, False)
def test_edit_dist_qg2_empty(self):
self.test_filter_pair('', '',
self.qg2, 'EDIT_DISTANCE', 1, False, False, False)
def test_edit_dist_qg2_empty_with_allow_empty(self):
self.test_filter_pair('', '',
self.qg2, 'EDIT_DISTANCE', 1, True, False, False)
def test_edit_dist_qg2_no_padding_empty(self):
self.test_filter_pair('', '', QgramTokenizer(2, padding=False),
'EDIT_DISTANCE', 1, False, False, False)
# tests for empty string input
def test_empty_lstring(self):
self.test_filter_pair('ab', '', self.dlm, 'JACCARD', 0.8,
False, False, True)
def test_empty_rstring(self):
self.test_filter_pair('', 'ab', self.dlm, 'JACCARD', 0.8,
False, False, True)
def test_empty_strings(self):
self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8,
False, False, True)
def test_empty_strings_with_allow_empty(self):
self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8,
True, False, False)
@nottest
def test_filter_pair(self, lstring, rstring, tokenizer, sim_measure_type,
threshold, allow_empty, allow_missing, expected_output):
suffix_filter = SuffixFilter(tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing)
actual_output = suffix_filter.filter_pair(lstring, rstring)
assert_equal(actual_output, expected_output)
# test SuffixFilter.filter_tables method
class FilterTablesTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.A = pd.DataFrame([{'id': 1, 'attr':'ab cd ef aa bb'},
{'id': 2, 'attr':''},
{'id': 3, 'attr':'ab'},
{'id': 4, 'attr':'ll oo he'},
{'id': 5, 'attr':'xy xx zz fg'},
{'id': 6, 'attr':np.NaN},
{'id': 7, 'attr':''}])
self.B = pd.DataFrame([{'id': 1, 'attr':'zz fg xx'},
{'id': 2, 'attr':'he ll'},
{'id': 3, 'attr':'xy pl ou'},
{'id': 4, 'attr':'aa'},
{'id': 5, 'attr':'fg cd aa ef ab'},
{'id': 6, 'attr':None},
{'id': 7, 'attr':' '}])
self.empty_table = pd.DataFrame(columns=['id', 'attr'])
self.default_l_out_prefix = 'l_'
self.default_r_out_prefix = 'r_'
# tests for JACCARD measure
def test_jac_dlm_075(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'))
def test_jac_dlm_075_with_out_attrs(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['id', 'attr'], ['id', 'attr']))
def test_jac_dlm_075_with_out_prefix(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr'],
'ltable.', 'rtable.'))
# tests for COSINE measure
def test_cos_dlm_08(self):
self.test_filter_tables(self.dlm, 'COSINE', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'))
# tests for DICE measure
def test_dice_dlm_08(self):
self.test_filter_tables(self.dlm, 'DICE', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'))
# tests for OVERLAP measure
def test_overlap_dlm_3(self):
self.test_filter_tables(self.dlm, 'OVERLAP', 3, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'))
# tests for EDIT_DISTANCE measure
def test_edit_distance_qg2_2(self):
A = pd.DataFrame([{'l_id': 1, 'l_attr':'19990'},
{'l_id': 2, 'l_attr':'200'},
{'l_id': 3, 'l_attr':'0'},
{'l_id': 4, 'l_attr':''},
{'l_id': 5, 'l_attr':np.NaN}])
B = pd.DataFrame([{'r_id': 1, 'r_attr':'200155'},
{'r_id': 2, 'r_attr':'190'},
{'r_id': 3, 'r_attr':'2010'},
{'r_id': 4, 'r_attr':''},
{'r_id': 5, 'r_attr':np.NaN},
{'r_id': 6, 'r_attr':'18950'}])
qg2_tok = QgramTokenizer(2)
expected_pairs = set(['1,2', '1,6', '2,2', '2,3',
'3,2', '4,4'])
self.test_filter_tables(qg2_tok, 'EDIT_DISTANCE', 2, False, False,
(A, B,
'l_id', 'r_id', 'l_attr', 'r_attr'))
# test with n_jobs above 1
def test_jac_dlm_075_with_njobs_above_1(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr'],
'ltable.', 'rtable.', 2))
# test filter attribute of type int
def test_jac_qg2_with_filter_attr_of_type_int(self):
A = pd.DataFrame([{'l_id': 1, 'l_attr':1990},
{'l_id': 2, 'l_attr':2000},
{'l_id': 3, 'l_attr':0},
{'l_id': 4, 'l_attr':-1},
{'l_id': 5, 'l_attr':1986}])
B = pd.DataFrame([{'r_id': 1, 'r_attr':2001},
{'r_id': 2, 'r_attr':1992},
{'r_id': 3, 'r_attr':1886},
{'r_id': 4, 'r_attr':2007},
{'r_id': 5, 'r_attr':2012}])
dataframe_column_to_str(A, 'l_attr', inplace=True)
dataframe_column_to_str(B, 'r_attr', inplace=True)
qg2_tok = QgramTokenizer(2, return_set=True)
self.test_filter_tables(qg2_tok, 'JACCARD', 0.3, False, False,
(A, B,
'l_id', 'r_id', 'l_attr', 'r_attr'))
# test allow_missing flag
def test_jac_dlm_075_allow_missing(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, True,
(self.A, self.B,
'id', 'id', 'attr', 'attr'))
# test allow_empty flag
def test_jac_dlm_075_allow_empty(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, True, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'))
# test allow_empty flag with output attributes
def test_jac_dlm_075_allow_empty_with_out_attrs(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, True, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr']))
# test with n_jobs above 1
def test_jac_dlm_075_with_njobs_above_1(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.75, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr'],
'ltable.', 'rtable.', 2))
# tests for empty table input
def test_empty_ltable(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.empty_table, self.B,
'id', 'id', 'attr', 'attr'))
def test_empty_rtable(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.A, self.empty_table,
'id', 'id', 'attr', 'attr'))
def test_empty_tables(self):
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.empty_table, self.empty_table,
'id', 'id', 'attr', 'attr'))
@nottest
def test_filter_tables(self, tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing, args):
suffix_filter = SuffixFilter(tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing)
sim_fn = get_sim_function(sim_measure_type)
# compute the join output pairs
join_output_pairs = set()
for l_idx, l_row in args[0].iterrows():
for r_idx, r_row in args[1].iterrows():
# if allow_missing is set to True, then add pairs containing
# missing value to the join output.
if pd.isnull(l_row[args[4]]) or pd.isnull(r_row[args[5]]):
if allow_missing:
join_output_pairs.add(','.join((str(l_row[args[2]]),
str(r_row[args[3]]))))
continue
if sim_measure_type == 'EDIT_DISTANCE':
l_join_val = str(l_row[args[4]])
r_join_val = str(r_row[args[5]])
comp_fn = COMP_OP_MAP['<=']
else:
l_join_val = tokenizer.tokenize(str(l_row[args[4]]))
r_join_val = tokenizer.tokenize(str(r_row[args[5]]))
comp_fn = COMP_OP_MAP['>=']
if (len(l_join_val) == 0 and len(r_join_val) == 0 and
sim_measure_type not in ['OVERLAP', 'EDIT_DISTANCE']):
if allow_empty:
join_output_pairs.add(','.join((str(l_row[args[2]]),
str(r_row[args[3]]))))
continue
# if both attributes are not missing and not empty, then check
# if the pair satisfies the join condition. If yes, then add it
# to the join output.
if comp_fn(sim_fn(l_join_val, r_join_val), threshold):
join_output_pairs.add(','.join((str(l_row[args[2]]),
str(r_row[args[3]]))))
actual_candset = suffix_filter.filter_tables(*args)
expected_output_attrs = ['_id']
l_out_prefix = self.default_l_out_prefix
r_out_prefix = self.default_r_out_prefix
# Check for l_out_prefix in args.
if len(args) > 8:
l_out_prefix = args[8]
expected_output_attrs.append(l_out_prefix + args[2])
# Check for r_out_prefix in args.
if len(args) > 9:
r_out_prefix = args[9]
expected_output_attrs.append(r_out_prefix + args[3])
# Check for l_out_attrs in args.
if len(args) > 6:
if args[6]:
l_out_attrs = remove_redundant_attrs(args[6], args[2])
for attr in l_out_attrs:
expected_output_attrs.append(l_out_prefix + attr)
# Check for r_out_attrs in args.
if len(args) > 7:
if args[7]:
r_out_attrs = remove_redundant_attrs(args[7], args[3])
for attr in r_out_attrs:
expected_output_attrs.append(r_out_prefix + attr)
# verify whether the output table has the necessary attributes.
assert_list_equal(list(actual_candset.columns.values),
expected_output_attrs)
actual_pairs = set()
for idx, row in actual_candset.iterrows():
actual_pairs.add(','.join((str(int(row[l_out_prefix + args[2]])),
str(int(row[r_out_prefix + args[3]])))))
# verify whether all the join output pairs are
# present in the actual output pairs
common_pairs = actual_pairs.intersection(join_output_pairs)
assert_equal(len(common_pairs), len(join_output_pairs))
# test SuffixFilter.filter_candset method
class FilterCandsetTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.A = pd.DataFrame([{'l_id': 1, 'l_attr':'ab cd ef aa bb'},
{'l_id': 2, 'l_attr':''},
{'l_id': 3, 'l_attr':'ab'},
{'l_id': 4, 'l_attr':'ll oo he'},
{'l_id': 5, 'l_attr':'xy xx zz fg'},
{'l_id': 6, 'l_attr': np.NaN}])
self.B = pd.DataFrame([{'r_id': 1, 'r_attr':'zz fg xx'},
{'r_id': 2, 'r_attr':'he ll'},
{'r_id': 3, 'r_attr':'xy pl ou'},
{'r_id': 4, 'r_attr':'aa'},
{'r_id': 5, 'r_attr':'fg cd aa ef ab'},
{'r_id': 6, 'r_attr':None}])
# generate cartesian product A x B to be used as candset
self.A['tmp_join_key'] = 1
self.B['tmp_join_key'] = 1
self.C = pd.merge(self.A[['l_id', 'tmp_join_key']],
self.B[['r_id', 'tmp_join_key']],
on='tmp_join_key').drop('tmp_join_key', 1)
self.empty_A = pd.DataFrame(columns=['l_id', 'l_attr'])
self.empty_B = pd.DataFrame(columns=['r_id', 'r_attr'])
self.empty_candset = pd.DataFrame(columns=['l_id', 'r_id'])
# tests for JACCARD measure
def test_jac_dlm_075(self):
expected_pairs = set(['1,5', '3,4', '5,1', '5,3'])
self.test_filter_candset(self.dlm, 'JACCARD', 0.75, False, False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# tests for COSINE measure
def test_cos_dlm_08(self):
expected_pairs = set(['1,5', '3,4', '4,2', '5,1', '5,3'])
self.test_filter_candset(self.dlm, 'COSINE', 0.8, False, False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# tests for DICE measure
def test_dice_dlm_08(self):
expected_pairs = set(['1,5', '3,4', '4,2', '5,1', '5,3'])
self.test_filter_candset(self.dlm, 'DICE', 0.8, False, False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# test allow_missing flag
def test_jac_dlm_075_allow_missing(self):
expected_pairs = set(['1,5', '3,4', '5,1', '5,3',
'6,1', '6,2', '6,3', '6,4', '6,5',
'6,6', '1,6', '2,6', '3,6', '4,6', '5,6'])
self.test_filter_candset(self.dlm, 'JACCARD', 0.75, False, True,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# tests for empty candset input
def test_empty_candset(self):
expected_pairs = set()
self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, False,
(self.empty_candset, 'l_id', 'r_id',
self.empty_A, self.empty_B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
@nottest
def test_filter_candset(self, tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing, args, expected_pairs):
suffix_filter = SuffixFilter(tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing)
actual_output_candset = suffix_filter.filter_candset(*args)
# verify whether the output table has the necessary attributes.
assert_list_equal(list(actual_output_candset.columns.values),
list(args[0].columns.values))
actual_pairs = set()
for idx, row in actual_output_candset.iterrows():
actual_pairs.add(','.join((str(row[args[1]]), str(row[args[2]]))))
# verify whether the actual pairs and the expected pairs match.
assert_equal(len(expected_pairs), len(actual_pairs))
common_pairs = actual_pairs.intersection(expected_pairs)
assert_equal(len(common_pairs), len(expected_pairs))
class SuffixFilterInvalidTestCases(unittest.TestCase):
def setUp(self):
self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}])
self.B = pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}])
self.tokenizer = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.sim_measure_type = 'JACCARD'
self.threshold = 0.8
@raises(TypeError)
def test_invalid_ltable(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables([], self.B, 'A.id', 'B.id',
'A.attr', 'B.attr')
@raises(TypeError)
def test_invalid_rtable(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, [], 'A.id', 'B.id',
'A.attr', 'B.attr')
@raises(AssertionError)
def test_invalid_l_key_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.invalid_id', 'B.id',
'A.attr', 'B.attr')
@raises(AssertionError)
def test_invalid_r_key_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.invalid_id',
'A.attr', 'B.attr')
@raises(AssertionError)
def test_invalid_l_filter_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.invalid_attr', 'B.attr')
@raises(AssertionError)
def test_invalid_r_filter_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.invalid_attr')
@raises(AssertionError)
def test_numeric_l_filter_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.int_attr', 'B.attr')
@raises(AssertionError)
def test_numeric_r_filter_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.int_attr')
@raises(AssertionError)
def test_invalid_l_out_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.attr',
['A.invalid_attr'], ['B.attr'])
@raises(AssertionError)
def test_invalid_r_out_attr(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
suffix_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.attr',
['A.attr'], ['B.invalid_attr'])
@raises(TypeError)
def test_invalid_tokenizer(self):
suffix_filter = SuffixFilter([], self.sim_measure_type, self.threshold)
@raises(AssertionError)
def test_invalid_tokenizer_for_edit_distance(self):
suffix_filter = SuffixFilter(self.tokenizer, 'EDIT_DISTANCE', 2)
@raises(TypeError)
def test_invalid_sim_measure_type(self):
suffix_filter = SuffixFilter(self.tokenizer, 'INVALID_TYPE',
self.threshold)
@raises(AssertionError)
def test_invalid_threshold(self):
suffix_filter = SuffixFilter(self.tokenizer, self.sim_measure_type, 1.2)
| bsd-3-clause |
murali-munna/scikit-learn | examples/model_selection/plot_roc.py | 146 | 3697 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/semi_supervised/label_propagation.py | 71 | 15342 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/plot_digits_pipe.py | 70 | 1813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: GaΓ«l Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using β__β separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
mavarick/jieba | test/extract_topic.py | 65 | 1463 | import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
| mit |
nicproulx/mne-python | examples/inverse/plot_label_from_stc.py | 31 | 3963 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <luke.bloy@gmail.com>
# Alex Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True)
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
DmitryYurov/BornAgain | Examples/Demos/simul_demo_lattice2.py | 2 | 2472 | '''
Simulation demo: Cylinder form factor without interference
'''
import numpy
import matplotlib
import math
from bornagain import *
M_PI = numpy.pi
# ----------------------------------
# describe sample and run simulation
# ----------------------------------
def RunSimulation():
# defining materials
mAmbience = HomogeneousMaterial("Air", 0.0, 0.0 )
mSubstrate = HomogeneousMaterial("Substrate", 6e-6, 2e-8 )
mParticle = HomogeneousMaterial("Particle", 6e-4, 2e-8 )
# particle 1
cylinder_ff = FormFactorCylinder(5*nanometer, 5*nanometer)
cylinder = Particle(mParticle, cylinder_ff)
position = kvector_t(0.0, 0.0, 0.0)
cylinder.setPosition(position)
particle_layout1 = ParticleLayout()
particle_layout1.addParticle(cylinder, 1.0)
# particle 2
position_2 = kvector_t(5.0*nanometer, 5.0*nanometer, 0.0)
cylinder.setPosition(position_2)
particle_layout2 = ParticleLayout()
particle_layout2.addParticle(cylinder, 1.0)
# interference function
interference = InterferenceFunction2DLattice.createSquare(10.0*nanometer)
pdf = FTDecayFunction2DCauchy(300.0*nanometer/2.0/M_PI, 100.0*nanometer/2.0/M_PI)
interference.setDecayFunction(pdf)
particle_layout1.setInterferenceFunction(interference)
particle_layout2.setInterferenceFunction(interference)
# top air layer
air_layer = Layer(mAmbience)
air_layer.addLayout(particle_layout1)
air_layer.addLayout(particle_layout2)
# substrate layer
substrate_layer = Layer(mSubstrate, 0)
# multilayer
multi_layer = MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayer(substrate_layer)
# build and run experiment
simulation = GISASSimulation()
simulation.setDetectorParameters(100, -2.0*degree, 2.0*degree, 100, 0.0*degree, 4.0*degree)
simulation.setBeamParameters(1.0*angstrom, 0.2*degree, 0.0*degree)
# run simulation
simulation.setSample(multi_layer)
simulation.runSimulation()
return simulation.result().array()
#-------------------------------------------------------------
# main()
#-------------------------------------------------------------
if __name__ == '__main__':
result = RunSimulation()
im = plt.imshow(result+1,
norm=matplotlib.colors.LogNorm(),
extent=[-2.0, 2.0, 0, 4.0])
plt.colorbar(im)
plt.xlabel(r'$\phi_f$', fontsize=20)
plt.ylabel(r'$\alpha_f$', fontsize=20)
plt.show()
| gpl-3.0 |
fdeheeger/mpld3 | mpld3/tests/test_elements.py | 16 | 5658 | """
Test creation of basic plot elements
"""
import numpy as np
import matplotlib.pyplot as plt
from .. import fig_to_dict, fig_to_html
from numpy.testing import assert_equal
def test_line():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'--k', alpha=0.3, zorder=10, lw=2)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
line = axrep['lines'][0]
assert_equal(list(sorted(line.keys())),
['alpha', 'color', 'coordinates', 'dasharray', 'data', 'id',
'linewidth', 'xindex', 'yindex', 'zorder'])
assert_equal(line['alpha'], 0.3)
assert_equal(line['color'], "#000000")
assert_equal(line['coordinates'], 'data')
assert_equal(line['dasharray'], '6,6')
assert_equal(line['zorder'], 10)
assert_equal(line['linewidth'], 2)
def test_markers():
fig, ax = plt.subplots()
ax.plot(np.arange(10), np.random.random(10),
'^k', alpha=0.3, zorder=10, mec='r', mew=2, c='b')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
markers = axrep['markers'][0]
assert_equal(list(sorted(markers.keys())),
['alpha', 'coordinates', 'data', 'edgecolor', 'edgewidth',
'facecolor', 'id', 'markerpath', 'xindex', 'yindex',
'zorder'])
assert_equal(markers['alpha'], 0.3)
assert_equal(markers['zorder'], 10)
assert_equal(markers['coordinates'], 'data')
assert_equal(markers['edgecolor'], '#FF0000')
assert_equal(markers['edgewidth'], 2)
assert_equal(markers['facecolor'], '#0000FF')
assert_equal(markers['markerpath'][0],
[[0.0, -3.0], [-3.0, 3.0], [3.0, 3.0]])
assert_equal(markers['markerpath'][1],
['M', 'L', 'L', 'Z'])
def test_scatter():
fig, ax = plt.subplots()
ax.scatter(np.arange(10), np.random.random(10), c='r', s=30,
marker='^', alpha=0.3, lw=2, edgecolors='b', zorder=10)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
points = axrep['collections'][0]
assert_equal(list(sorted(points.keys())),
['alphas', 'edgecolors', 'edgewidths', 'facecolors', 'id',
'offsetcoordinates', 'offsets', 'pathcoordinates', 'paths',
'pathtransforms', 'xindex', 'yindex', 'zorder'])
assert_equal(points['alphas'], [0.3])
assert_equal(points['zorder'], 10)
assert_equal(points['edgecolors'], ['#0000FF'])
assert_equal(points['facecolors'], ['#FF0000'])
assert_equal(points['edgewidths'], (2.0,))
assert_equal(points['paths'][0][0],
[[0.0, 0.5], [-0.5, -0.5], [0.5, -0.5]])
assert_equal(points['paths'][0][1],
['M', 'L', 'L', 'Z'])
assert_equal(points['pathtransforms'],
[[6.085806194501846, 0.0, 0.0, 6.085806194501846, 0.0, 0.0]])
def test_patch():
fig, ax = plt.subplots()
ax.add_patch(plt.Rectangle((0, 0), 1, 2, alpha=0.2, linewidth=2,
edgecolor='green', facecolor='red', zorder=3))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
path = axrep['paths'][0]
assert_equal(list(sorted(path.keys())),
['alpha', 'coordinates', 'dasharray', 'data', 'edgecolor',
'edgewidth', 'facecolor', 'id', 'pathcodes',
'xindex', 'yindex', 'zorder'])
assert_equal(path['alpha'], 0.2)
assert_equal(path['edgecolor'], "#008000")
assert_equal(path['facecolor'], "#FF0000")
assert_equal(path['edgewidth'], 2)
assert_equal(path['zorder'], 3)
def test_text():
fig, ax = plt.subplots()
ax.text(0.1, 0.1, "abcde", size=14, color='red', alpha=0.7,
rotation=15, ha='center', va='center')
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
text = axrep['texts'][0]
assert_equal(list(sorted(text.keys())),
['alpha', 'color', 'coordinates', 'fontsize', 'h_anchor',
'id', 'position', 'rotation', 'text', 'v_baseline',
'zorder'])
assert_equal(text['alpha'], 0.7)
assert_equal(text['color'], "#FF0000")
assert_equal(text['text'], "abcde")
assert_equal(text['rotation'], -15)
assert_equal(text['fontsize'], 14)
assert_equal(text['position'], [0.1, 0.1])
assert_equal(text['h_anchor'], 'middle')
assert_equal(text['v_baseline'], 'central')
assert_equal(text['zorder'], 3)
assert_equal(text['coordinates'], "data")
def test_image():
fig, ax = plt.subplots()
ax.imshow(np.random.random((20, 20)), cmap=plt.cm.binary,
alpha=0.2, zorder=4, extent=(2, 4, 3, 5))
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
image = axrep['images'][0]
# TODO: how to test data?
assert_equal(list(sorted(image.keys())),
['alpha', 'coordinates', 'data', 'extent', 'id', 'zorder'])
assert_equal(image['alpha'], 0.2)
assert_equal(image['extent'], (2, 4, 3, 5))
assert_equal(image['zorder'], 4)
assert_equal(image['coordinates'], "data")
def test_ticks():
plt.xticks([1,2,3])
rep = fig_to_html(plt.gcf())
# TODO: use casperjs here if available to confirm that the xticks
# are rendeder as expected
# pandas tslib generates ticks with unusual dtypes
# test that they are converted to html successfully
plt.xticks(np.array([1,2,3], dtype=np.int32))
rep = fig_to_html(plt.gcf())
# custom ticks should appear in the correct place, with the
# correct text
positions, labels = [0, 1, 10], ['A','B','C']
rep = fig_to_html(plt.gcf())
# TODO: use casperjs here if available to confirm that the xticks
# are rendeder as expected
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
mickypaganini/SSI2016-jet-clustering | hdb.py | 1 | 6413 | print(__doc__)
from sklearn import metrics
import numpy as np
from read_data import read_data
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from itertools import cycle
import os
from sklearn.neighbors import DistanceMetric
def hdb(txtfile, event, weighted, noise):
run = '10jets_overlap'
##############################################################################
# Read in sample data
X_all, e_all = read_data(txtfile)
X = X_all[event]
e = e_all[event].ravel()
##############################################################################
# Set variables depending on noise and weighting decisions
# Filter out noise
if not noise:
X = X[e > 1e-7]
e = e[e > 1e-7]
X_unweighted = X
e_unweighted = e
# Upweight events with high energy
if weighted:
# Weight by E/E_noise (actually that would be too high)
NOISE_LVL = 0.08 # GeV
X_rep = []
e_rep = []
for p, energy in zip(X, e):
n_replications = int(np.ceil((energy / NOISE_LVL)))
# if energy > 1:
# print 'energy = {}, n_replications = {}'.format(energy, n_replications)
X_rep.extend([p] * (n_replications))
e_rep.extend([energy] * (n_replications))
X = np.array(X_rep)
e = np.array(e_rep)
# Set other params
#min_cluster_size=1250
min_cluster_size=2000
folder = 'weighted'
else: # no weighting
min_cluster_size=5
folder = 'unweighted'
# Sort output into correct directory
if noise:
folder = os.path.join(folder, 'with_noise', run)
else:
folder = os.path.join(folder, 'no_noise', run)
# Make sure that the output directory exists, otherwise create it
if not os.path.exists(folder):
os.makedirs(folder)
##############################################################################
# Compute HDBSCAN
import hdbscan
# from hdbscan.dist_metrics import WMinkowskiDistance
# # MINKOWSKI = DistanceMetric.get_metric('wminkowski', p=2, w=e)
# MINKOWSKI = WMinkowskiDistance(p=2, w=e)
# def minkowski(X, Y=None):
# try:
# return MINKOWSKI.pairwise(X, Y)
# except ValueError:
# return MINKOWSKI.pairwise(X.reshape(1, -1), Y.reshape(1, -1) if Y is not None else None)
clusterer = hdbscan.HDBSCAN(
min_cluster_size=min_cluster_size,
gen_min_span_tree=True,
allow_single_cluster=True,
#min_samples = 10
)
clusterer.fit(X)
##############################################################################
# Output cluster info
labels = clusterer.labels_
n_clusters_ = len(set(labels))
if -1 in set(labels): # '-1' is just the group of unclustered objects
n_real_clusters = n_clusters_ - 1
else:
n_real_clusters = n_clusters_
print('Estimated number of clusters: %d' % (n_real_clusters))
# Find center of mass energy and position for cluster
cluster_centers = []
cluster_energies = []
for l in set(labels): # !! WARNING !!: these should be computed with unweighted quantities
#_e = np.unique(e[labels == l])
#_X = np.unique(np.array([(x, y) for x, y in zip(X[labels == l, 0], X[labels == l, 1])]))
b = np.array(list(set(zip(X[labels == l, 0], X[labels == l, 1], e[labels == l]))))
_X = b[:, :2]
_e = b[:, -1].ravel()
cme = sum(_e)
cm = np.sum(_X * _e[:, np.newaxis], axis=0) / cme
#cme = sum(e[labels == l])
#cm = np.sum(X[labels == l] * e[labels == l][:, np.newaxis], axis=0) / cme
print 'Cluster {}: Center = {}, Energy = {}'.format(l, cm, cme)
cluster_centers.append(cm)
cluster_energies.append(cme)
##############################################################################
# Produce HDBSCAN-specific plots
#plot_hdb(clusterer, folder)
##############################################################################
# Plot result
# -- display event
plot_eta_phi(X, X_unweighted, e, e_unweighted, n_clusters_, labels, cluster_centers, folder)
# ---------------------------------------------------------------------
def plot_eta_phi(X, X_unweighted, e, e_unweighted, n_clusters_, labels, cluster_centers, folder):
binx = np.linspace(-3.0, 3.0, 61)
biny = np.linspace(-3.1, 3.1, 63)
#plt.hist2d(X[:, 0], X[:, 1], weights=e.ravel(), bins=(binx, biny), cmap='rainbow', norm=LogNorm())
plt.hist2d(X_unweighted[:, 0], X_unweighted[:, 1], weights=e_unweighted.ravel(), bins=(binx, biny), cmap='rainbow', norm=LogNorm())
cb = plt.colorbar()
cb.set_label('Energy (GeV)')
plt.xlabel(r'$\eta$')
plt.ylabel(r'$\phi$')
# -- display clustering features
for k in range(n_clusters_):
class_members = labels == k
cluster_center = cluster_centers[k]
if sum(class_members) == 0:
#pass
plt.plot(cluster_center[0], cluster_center[1], 'kx', mew=2, markersize=14)
else:
plt.plot(cluster_center[0], cluster_center[1], 'ko', markerfacecolor='None', #markeredgecolor='k',
markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], color='black',alpha=0.2)
plt.savefig(os.path.join(folder, 'etaphi.pdf'))
plt.show()
# ---------------------------------------------------------------------
def plot_hdb(clusterer, folder):
clusterer.minimum_spanning_tree_.plot(edge_cmap='rainbow',
edge_alpha=0.6,
node_size=40,
edge_linewidth=2)
plt.savefig(os.path.join(folder, 'minimum_spanning_tree.jpg'))
plt.clf()
# clusterer.single_linkage_tree_.plot(cmap='winter')
# plt.savefig(os.path.join(folder, 'single_linkage_tree.pdf'))
# plt.clf()
clusterer.condensed_tree_.plot(select_clusters=True, cmap='winter')
plt.savefig(os.path.join(folder, 'condensed_tree.pdf'))
plt.clf()
# ---------------------------------------------------------------------
if __name__ == '__main__':
import sys
import argparse
# Read in arguments
parser = argparse.ArgumentParser()
parser.add_argument('--txtfile', help="path to the text file with data", default='JetGenerator/clu15_2jets_withNoise.txt')
parser.add_argument('--event', help="int, number of the event to consider", type=int, default=1)
parser.add_argument('--weighted', help="boolean that determines whether to weight the points by their energy", default=False, action='store_true')
parser.add_argument('--noise', help="boolean that determines whether to add noise", default=False, action='store_true')
args = parser.parse_args()
sys.exit(hdb(args.txtfile, args.event, args.weighted, args.noise))
| mit |
vbalderdash/LMAsimulation | simulation_ellipse.py | 1 | 12093 | import numpy as np
from scipy.linalg import lstsq
from scipy.optimize import leastsq
from coordinateSystems import GeographicSystem
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
def travel_time(X, X_ctr, c, t0=0.0, get_r=False):
""" Units are meters, seconds.
X is a (N,3) vector of source locations, and X_ctr is (N,3) receiver
locations.
t0 may be used to specify a fixed amount of additonal time to be
added to the travel time.
If get_r is True, return the travel time *and* calculated range.
Otherwise
just return the travel time.
"""
ranges = (np.sum((X[:, np.newaxis] - X_ctr)**2, axis=2)**0.5).T
time = t0+ranges/c
if get_r == True:
return time, ranges
if get_r == False:
return time
def received_power(power_emit, r, wavelength=4.762, recv_gain=1.0):
""" Calculate the free space loss. Defaults are for no receive gain and
a 63 MHz wave.
Units: watts, meters
"""
four_pi_r = 4.0 * np.pi * r
free_space_loss = (wavelength*wavelength) / (four_pi_r * four_pi_r)
return recv_gain * power_emit * free_space_loss
def precalc_station_terms(stations_ECEF):
""" Given a (N_stn, 3) array of station locations,
return dxvec and drsq which are (N, N, 3) and (N, N),
respectively, where the first dimension is the solution
for a different master station
"""
dxvec = stations_ECEF-stations_ECEF[:, np.newaxis]
r1 = (np.sum(stations_ECEF**2, axis=1))
drsq = r1-r1[:, np.newaxis]
return dxvec, drsq
def linear_first_guess(t_i, dxvec, drsq, c=3.0e8):
""" Given a vector of (N_stn) arrival times,
calcluate a first-guess solution for the source location.
Return f=(x, y, z, t), the source retrieval locations.
"""
g = 0.5*(drsq - (c**2)*(t_i**2))
K = np.vstack((dxvec.T, -c*t_i)).T
f, residuals, rank, singular = lstsq(K, g)
return f
def predict(p, stations_ECEF2, c=3.0e8):
""" Predict arrival times for a particular fit of x,y,z,t source
locations. p are the parameters to retrieve
"""
return (p[3] + ((np.sum((p[:3] - stations_ECEF2) *
(p[:3] - stations_ECEF2), axis=1))**0.5)) / c
def residuals(p, t_i, stations_ECEF2):
return t_i - predict(p, stations_ECEF2)
def dfunc(p, t_i, stations_ECEF2, c=3.0e8):
return -np.vstack(((p[:3] - stations_ECEF2).T / (c *
(np.sum((p[:3] - stations_ECEF2) *
(p[:3] - stations_ECEF2), axis=1))**0.5),
np.array([1./c]*np.shape(stations_ECEF2)[0])))
def gen_retrieval_math(i, selection, t_all, t_mins, dxvec, drsq, center_ECEF,
stations_ECEF, dt_rms, min_stations=5,
max_z_guess=25.0e3):
""" t_all is a N_stations x N_points masked array of arrival times at
each station.
t_min is an N-point array of the index of the first unmasked station
to receive a signal
center_ECEF for the altitude check
This streamlines the generator function, which emits a stream of
nonlinear least-squares solutions.
"""
m = t_mins[i]
stations_ECEF2=stations_ECEF[selection]
# Make a linear first guess
p0 = linear_first_guess(np.array(t_all[:,i][selection]-t_all[m,i]),
dxvec[m][selection],
drsq[m][selection])
t_i =t_all[:,i][selection]-t_all[m,i]
# Checking altitude in lat/lon/alt from local coordinates
latlon = np.array(GeographicSystem().fromECEF(p0[0], p0[1],p0[2]))
if (latlon[2]<0) | (latlon[2]>25000):
latlon[2] = 7000
new = GeographicSystem().toECEF(latlon[0], latlon[1], latlon[2])
p0[:3]=np.array(new)
plsq = np.array([np.nan]*5)
plsq[:4], cov, infodict, mesg,ier = leastsq(residuals, p0,
args=(t_i, stations_ECEF2),
Dfun=dfunc,col_deriv=1,full_output=True)
plsq[4] = np.sum(infodict['fvec']*infodict['fvec'])/(
dt_rms*dt_rms*(float(np.shape(stations_ECEF2)[0]-4)))
return plsq
def gen_retrieval(t_all, t_mins, dxvec, drsq, center_ECEF, stations_ECEF,
dt_rms, min_stations=5, max_z_guess=25.0e3):
""" t_all is a N_stations x N_points masked array of arrival times at
each station.
t_min is an N-point array of the index of the first unmasked station
to receive a signal
center_ECEF for the altitude check
This is a generator function, which emits a stream of nonlinear
least-squares solutions.
"""
for i in range(t_all.shape[1]):
selection=~np.ma.getmask(t_all[:,i])
if np.all(selection == True):
selection = np.array([True]*len(t_all[:,i]))
yield gen_retrieval_math(i, selection, t_all, t_mins, dxvec, drsq,
center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
elif np.sum(selection)>=min_stations:
yield gen_retrieval_math(i, selection, t_all, t_mins, dxvec, drsq,
center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
else:
yield np.array([np.nan]*5)
def gen_retrieval_full(t_all, t_mins, dxvec, drsq, center_ECEF, stations_ECEF,
dt_rms, c0, min_stations=5, max_z_guess=25.0e3):
""" t_all is a N_stations x N_points masked array of arrival times at
each station.
t_min is an N-point array of the index of the first unmasked station
to receive a signal
center_ECEF for the altitude check
This is a generator function, which emits a stream of nonlinear
least-squares solutions.
Timing comes out of least-squares function as t*c from the initial
station
"""
for i in range(t_all.shape[1]):
selection=~np.ma.getmask(t_all[:,i])
plsq = np.array([np.nan]*7)
if np.all(selection == True):
selection = np.array([True]*len(t_all[:,i]))
plsq[:5] = gen_retrieval_math(i, selection, t_all, t_mins, dxvec,
drsq, center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
plsq[5] = plsq[3]/c0 + t_all[t_mins[i],i]
plsq[6] = np.shape(stations_ECEF[selection])[0]
yield plsq
elif np.sum(selection)>=min_stations:
plsq[:5] = gen_retrieval_math(i, selection, t_all, t_mins, dxvec,
drsq, center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
plsq[5] = plsq[3]/c0 + t_all[t_mins[i],i]
plsq[6] = np.shape(stations_ECEF[selection])[0]
yield plsq
else:
plsq[6] = np.shape(stations_ECEF[selection])[0]
yield plsq
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
def array_from_generator2(generator, rows):
"""Creates a numpy array from a specified number
of values from the generator provided."""
data = []
for row in range(rows):
try:
data.append(next(generator))
except StopIteration:
break
return np.array(data)
def black_boxtesting(x,y,z,n,
stations_local,ordered_threshs,stations_ecef,center_ecef,
tanps,
c0,dt_rms,tanp,projl,chi2_filter,min_stations=5,ntsd=3):
""" This funtion incorporates most of the Monte Carlo functions and calls
into one big block of code.
x,y,z are the source location in the local tangent plane (m)
n is the number of iterations
stations_local is the the (N-stations, 3) array of station locations
in the local tangent plane
ordered_threshs is the N-station array of thresholds in the same order
as the station arrays (in dBm).
stations_ecef is (N,3) array in ECEF coordinates
center_ecef is just the center location of the network, just easier
to pass into the fuction separately to save some calculation
c0 is th speed of light
dt_rms is the standard deviation of the timing error (Gaussian, in s)
tanp is the tangent plane object
projl is the map projection object
chi2_filter is the maximum allowed reduced chi2 filter for the
calculation (use at most 5)
min_stations is the minimum number of stations required to receive
a source. This must be at least 5, can be higher to filter out more
poor solutions
Returned are the w,h,theta values of the covariance ellipses in one
array and the standard deviation of the altitude solutions separately.
Covariance ellipses are by default set at 3 standard deviations.
"""
points = np.array([np.zeros(n)+x, np.zeros(n)+y, np.zeros(n)+z]).T
powers = np.empty(n)
# For the theoretical distribution:
for i in range(len(powers)):
powers[i] = np.max(1./np.random.uniform(0,1000,2000))
# Calculate distance and power retrieved at each station and mask
# the stations which have higher thresholds than the retrieved power
points_f_ecef = (tanp.fromLocal(points.T)).T
dt, ran = travel_time(points, stations_local, c0, get_r=True)
pwr = received_power(powers, ran)
masking = 10.*np.log10(pwr/1e-3) < ordered_threshs[:,np.newaxis]
masking2 = np.empty_like(masking)
for i in range(len(stations_ecef[:,0])):
masking2[i] = tanps[i].toLocal(points_f_ecef.T)[2]<0
masking = masking | masking2
pwr = np.ma.masked_where(masking, pwr)
dt = np.ma.masked_where(masking, dt)
ran = np.ma.masked_where(masking, ran)
# Add error to the retreived times
dt_e = dt + np.random.normal(scale=dt_rms, size=np.shape(dt))
dt_mins = np.argmin(dt_e, axis=0)
# Precalculate some terms in ecef (fastest calculation)
points_f_ecef = (tanp.fromLocal(points.T)).T
full_dxvec, full_drsq = precalc_station_terms(stations_ecef)
# Run the retrieved locations calculation
# gen_retrieval returns a tuple of four positions, x,y,z,t.
dtype=[('x', float), ('y', float), ('z', float), ('t', float),
('chi2', float)]
# Prime the generator function - pauses at the first yield statement.
point_gen = gen_retrieval(dt_e, dt_mins, full_dxvec, full_drsq,
center_ecef, stations_ecef, dt_rms,
min_stations)
# Suck up the values produced by the generator, produce named array.
retrieved_locations = array_from_generator2(point_gen,rows=n)
# retrieved_locations = np.fromiter(point_gen, dtype=dtype)
retrieved_locations = np.array([(a,b,c,e) for (a,b,c,d,e) in
retrieved_locations])
chi2 = retrieved_locations[:,3]
retrieved_locations = retrieved_locations[:,:3]
retrieved_locations = np.ma.masked_invalid(retrieved_locations)
#Convert back to local tangent plane
soluts = tanp.toLocal(retrieved_locations.T)
proj_soluts = projl.fromECEF(retrieved_locations[:,0],
retrieved_locations[:,1],
retrieved_locations[:,2])
good = proj_soluts[2] > 0
proj_soluts = (proj_soluts[0][good],proj_soluts[1][good],
proj_soluts[2][good])
proj_soluts = np.ma.masked_invalid(proj_soluts)
cov = np.cov(proj_soluts[0][chi2[good]<chi2_filter], proj_soluts[1][chi2[good]<chi2_filter])
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * ntsd * np.sqrt(vals)
return np.array([w,h,theta]),np.std(proj_soluts[2][chi2[good]<chi2_filter]
) | mit |
grundgruen/zipline | zipline/utils/data_source_tables_gen.py | 40 | 7380 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import getopt
import traceback
import numpy as np
import pandas as pd
import datetime
import logging
import tables
import gzip
import glob
import os
import random
import csv
import time
from six import print_
FORMAT = "%(asctime)-15s -8s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
OHLCTableDescription = {'sid': tables.StringCol(14, pos=2),
'dt': tables.Int64Col(pos=1),
'open': tables.Float64Col(dflt=np.NaN, pos=3),
'high': tables.Float64Col(dflt=np.NaN, pos=4),
'low': tables.Float64Col(dflt=np.NaN, pos=5),
'close': tables.Float64Col(dflt=np.NaN, pos=6),
"volume": tables.Int64Col(dflt=0, pos=7)}
def process_line(line):
dt = np.datetime64(line["dt"]).astype(np.int64)
sid = line["sid"]
open_p = float(line["open"])
high_p = float(line["high"])
low_p = float(line["low"])
close_p = float(line["close"])
volume = int(line["volume"])
return (dt, sid, open_p, high_p, low_p, close_p, volume)
def parse_csv(csv_reader):
previous_date = None
data = []
dtype = [('dt', 'int64'), ('sid', '|S14'), ('open', float),
('high', float), ('low', float), ('close', float),
('volume', int)]
for line in csv_reader:
row = process_line(line)
current_date = line["dt"][:10].replace("-", "")
if previous_date and previous_date != current_date:
rows = np.array(data, dtype=dtype).view(np.recarray)
yield current_date, rows
data = []
data.append(row)
previous_date = current_date
def merge_all_files_into_pytables(file_dir, file_out):
"""
process each file into pytables
"""
start = None
start = datetime.datetime.now()
out_h5 = tables.openFile(file_out,
mode="w",
title="bars",
filters=tables.Filters(complevel=9,
complib='zlib'))
table = None
for file_in in glob.glob(file_dir + "/*.gz"):
gzip_file = gzip.open(file_in)
expected_header = ["dt", "sid", "open", "high", "low", "close",
"volume"]
csv_reader = csv.DictReader(gzip_file)
header = csv_reader.fieldnames
if header != expected_header:
logging.warn("expected header %s\n" % (expected_header))
logging.warn("header_found %s" % (header))
return
for current_date, rows in parse_csv(csv_reader):
table = out_h5.createTable("/TD", "date_" + current_date,
OHLCTableDescription,
expectedrows=len(rows),
createparents=True)
table.append(rows)
table.flush()
if table is not None:
table.flush()
end = datetime.datetime.now()
diff = (end - start).seconds
logging.debug("finished it took %d." % (diff))
def create_fake_csv(file_in):
fields = ["dt", "sid", "open", "high", "low", "close", "volume"]
gzip_file = gzip.open(file_in, "w")
dict_writer = csv.DictWriter(gzip_file, fieldnames=fields)
current_dt = datetime.date.today() - datetime.timedelta(days=2)
current_dt = pd.Timestamp(current_dt).replace(hour=9)
current_dt = current_dt.replace(minute=30)
end_time = pd.Timestamp(datetime.date.today())
end_time = end_time.replace(hour=16)
last_price = 10.0
while current_dt < end_time:
row = {}
row["dt"] = current_dt
row["sid"] = "test"
last_price += random.randint(-20, 100) / 10000.0
row["close"] = last_price
row["open"] = last_price - 0.01
row["low"] = last_price - 0.02
row["high"] = last_price + 0.02
row["volume"] = random.randint(10, 1000) * 10
dict_writer.writerow(row)
current_dt += datetime.timedelta(minutes=1)
if current_dt.hour > 16:
current_dt += datetime.timedelta(days=1)
current_dt = current_dt.replace(hour=9)
current_dt = current_dt.replace(minute=30)
gzip_file.close()
def main(argv=None):
"""
This script cleans minute bars into pytables file
data_source_tables_gen.py
[--tz_in] sets time zone of data only reasonably fast way to use
time.tzset()
[--dir_in] iterates through directory provided of csv files in gzip form
in form:
dt, sid, open, high, low, close, volume
2012-01-01T12:30:30,1234HT,1, 2,3,4.0
[--fake_csv] creates a fake sample csv to iterate through
[--file_out] determines output file
"""
if argv is None:
argv = sys.argv
try:
dir_in = None
file_out = "./all.h5"
fake_csv = None
try:
opts, args = getopt.getopt(argv[1:], "hdft",
["help",
"dir_in=",
"debug",
"tz_in=",
"fake_csv=",
"file_out="])
except getopt.error as msg:
raise Usage(msg)
for opt, value in opts:
if opt in ("--help", "-h"):
print_(main.__doc__)
if opt in ("-d", "--debug"):
logging.basicConfig(format=FORMAT,
level=logging.DEBUG)
if opt in ("-d", "--dir_in"):
dir_in = value
if opt in ("-o", "--file_out"):
file_out = value
if opt in ("--fake_csv"):
fake_csv = value
if opt in ("--tz_in"):
os.environ['TZ'] = value
time.tzset()
try:
if dir_in:
merge_all_files_into_pytables(dir_in, file_out)
if fake_csv:
create_fake_csv(fake_csv)
except Exception:
error = "An unhandled error occured in the"
error += "data_source_tables_gen.py script."
error += "\n\nTraceback:\n"
error += '-' * 70 + "\n"
error += "".join(traceback.format_tb(sys.exc_info()[2]))
error += repr(sys.exc_info()[1]) + "\n"
error += str(sys.exc_info()[1]) + "\n"
error += '-' * 70 + "\n"
print_(error)
except Usage as err:
print_(err.msg)
print_("for help use --help")
return 2
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
allthroughthenight/aces | python/drivers/ext_Hs_analysis.py | 1 | 22155 | import sys
import math
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../functions')
from base_driver import BaseDriver
from helper_objects import BaseField
import USER_INPUT
from ERRSTP import ERRSTP
from ERRWAVBRK1 import ERRWAVBRK1
from WAVELEN import WAVELEN
from EXPORTER import EXPORTER
## ACES Update to MATLAB
#-------------------------------------------------------------
# Driver for Extremal Significant Wave Height Analysis (page 1-3 of ACES
# User's Guide). Provide significant wave height estimates for various
# return periods.
# Updated by: Mary Anderson, USACE-CHL-Coastal Processes Branch
# Date Created: May 13, 2011
# Date Verified: June 18, 2012
# Requires the following functions:
# ERRWAVBRK1
# MAIN VARIABLE LIST:
# INPUT
# Nt: estimated total number of events from the population during the
# length of the record
# K: length of the record in years
# d: water depth
# Hs: significant wave heights from long-term data source
# OUTPUT
# Hsr: significant wave height with return period Tr
# sigr: standard deviation of significant wave height
# pe: probability that a height with given return period will be equaled
# or exceeded during some time period
# OTHERS
# yact: probability as estimated by the plotting forumula
# yest: probability as estimated by the distribution
# signr: normalized standard deviation of significant wave height with
# return period Tr
#-------------------------------------------------------------
class ExtHsAnalysis(BaseDriver):
def __init__(self, Nt = None, K = None, d = None,\
Hs = None, option = None):
self.exporter = EXPORTER("output/exportExtHsAnalysis")
self.isSingleCase = True
if Nt != None:
self.defaultValueNt = Nt
if K != None:
self.defaultValueK = K
if d != None:
self.defaultValue_d = d
if Hs != None:
self.defaultValueHs = Hs
if option != None:
self.defaultValue_option = option
super(ExtHsAnalysis, self).__init__()
self.exporter.close()
# end __init__
def userInput(self):
super(ExtHsAnalysis, self).userInput()
if not hasattr(self, "defaultValueHs"):
hsCount = USER_INPUT.DATA_VALUE(\
"the number of significant wave heights", 1, 200)
hsCount = int(hsCount)
self.Hs = []
for i in range(hsCount):
self.Hs.append(USER_INPUT.DATA_VALUE(\
"significant wave height [%s] #%d" %\
(self.labelUnitDist, (i + 1)),\
0.0, 100.0))
else:
self.Hs = self.defaultValueHs
if not hasattr(self, "defaultValue_option"):
self.option = USER_INPUT.FINITE_CHOICE(\
"Confidence intervals:\n[1] 80%\n[2] 85%\n[3] 90%\n[4] 95%\n[5] 99%\nSelect option: ",\
["1", "2", "3", "4", "5"])
self.option = int(self.option)
else:
self.option = self.defaultValue_option
# end userInput
def defineInputDataList(self):
self.inputList = []
if not hasattr(self, "defaultValueNt"):
self.inputList.append(BaseField(\
"Nt: estimated total number of events", 0.0, 10000.0))
if not hasattr(self, "defaultValueK"):
self.inputList.append(BaseField(\
"K: length of the record in years", 0.0, 999.9))
if not hasattr(self, "defaultValue_d"):
self.inputList.append(BaseField(\
"d: water depth [%s]" % (self.labelUnitDist), 0.0, 1000.0))
# end defineInputDataList
def fileOutputRequestInit(self):
self.fileOutputRequestMain(defaultFilename = "ext_Hs_analysis")
def getCalcValues(self, caseInputList):
currIndex = 0
if hasattr(self, "defaultValueNt"):
Nt = self.defaultValueNt
else:
Nt = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValueK"):
K = self.defaultValueK
else:
K = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValue_d"):
d = self.defaultValue_d
else:
d = caseInputList[currIndex]
return Nt, K, d
# end getCalcValues
def performCalculations(self, caseInputList, caseIndex = 0):
Nt, K, d = self.getCalcValues(caseInputList)
N = len(self.Hs)
lambdaVal = Nt / K
nu = N / Nt
dataDict = {"Nt": Nt, "K": K, "d": d, "N": N,\
"nu": nu, "lambdaVal": lambdaVal}
self.Hs = [i / 0.3048 for i in self.Hs]
d = d / 0.3048
Hb = ERRWAVBRK1(d, 0.78)
for j in self.Hs:
if not (j < Hb):
self.errorMsg = "Error: Input wave broken (Hb = %6.2f %s)" %\
(Hb, self.labelUnitDist)
print(self.errorMsg)
self.fileOutputWriteMain(dataDict, caseIndex)
return
ret = [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9,\
2.0, 5.0, 10.0, 25.0, 50.0, 100.0]
#Coefficients
k_W = [0.75, 1.00, 1.40, 2.00]
a1 = [0.64, 1.65, 1.92, 2.05, 2.24]
a2 = [9.0, 11.4, 11.4, 11.4, 11.4]
kappa = [0.93, -0.63, 0.00, 0.69, 1.34]
c = [0.0, 0.0, 0.3, 0.4, 0.5]
epsi = [1.33, 1.15, 0.90, 0.72, 0.54]
pret = [1.0, 2.0, 5.0, 10.0, 25.0, 50.0, 100.0]
plen = pret
self.Hs.sort()
self.Hs.reverse()
yact = [[0.0 for j in range(5)] for i in range(N)]
ym = [[0.0 for j in range(5)] for i in range(N)]
for j in range(N):
yact[j][0] = 1.0 - ((j + 1) - 0.44)/(Nt + 0.12) #FT-I
ym[j][0] = -math.log(-math.log(yact[j][0]))
for m in range(1, 5):
k = k_W[m - 1] #Weibull
yact[j][m] = 1.0 - ((j + 1) - 0.2 - 0.27/math.sqrt(k))/\
(Nt + 0.2 + 0.23/math.sqrt(k))
ym[j][m] = (-math.log(1.0 - yact[j][m]))**(1.0/k)
# end for loop
Sx = sum(self.Hs)
Sy = [sum([yact[j][m] for j in range(N)]) for m in range(5)]
Sxx = sum([i**2 for i in self.Hs])
Slly = [sum([ym[j][m] for j in range(N)]) for m in range(5)]
Syy = [sum([ym[j][m]**2 for j in range(N)]) for m in range(5)]
Sxy = [[0.0 for j in range(5)] for i in range(N)]
for j in range(N):
for m in range(5):
Sxy[j][m] = self.Hs[j]*ym[j][m]
Sxy = [sum([Sxy[j][m] for j in range(N)]) for m in range(5)]
alpha = []
beta = []
for m in range(5):
alpha.append((N*Sxy[m] - Sx*Slly[m])/(N*Syy[m] - Slly[m]**2))
beta.append((1.0/N)*(Sx - alpha[m]*Slly[m]))
yest = []
for j in range(N):
yest.append([])
yest[j].append(\
math.exp(-math.exp(-(self.Hs[j] - beta[0])/alpha[0]))) #FT-I
for m in range(1, 5):
k = k_W[m - 1] #Weibull
if (self.Hs[j] - beta[m])/alpha[m] >= 0.0:
yest[j].append(\
1.0 - math.exp(-((self.Hs[j] - beta[m])/alpha[m])**k))
else:
yest[j].append(0.0)
st = []
for j in range(N):
st.append([])
for m in range(5):
st[j].append((yact[j][m] - yest[j][m])**2)
# sumresid = sum(st)/0.3048
sumresid = [sum([st[j][m] for j in range(N)]) for m in range(5)]
sumresid = [i/0.3048 for i in sumresid] #sum square of residuals
rxy = []
for m in range(5):
numer = N*Sxy[m] - Sx*Slly[m]
term1d = N*Sxx - Sx**2
term2d = N*Syy[m] - Slly[m]**2
rxy.append(numer/(math.sqrt(term1d*term2d))) #correlation coefficient
yr = [[0.0 for m in range(5)] for j in range(len(ret))]
Hsr = [[0.0 for m in range(5)] for j in range(len(ret))]
for j in range(len(ret)):
prob1 = 1.0 - 1.0/(lambdaVal*ret[j])
if prob1 <= 0.0:
prob1 = 1.0*10**(-7)
yr[j][0] = -math.log(-math.log(prob1)) #FT-I
Hsr[j][0] = alpha[0]*yr[j][0] + beta[0]
for m in range(1, 5):
prob2 = lambdaVal*ret[j]
if prob2 <= 0.0:
prob2 = 1.0*10**(-7)
k = k_W[m - 1] #Weibull
yr[j][m] = math.log(prob2)**(1.0/k)
Hsr[j][m] = alpha[m]*yr[j][m] + beta[m]
# end for loop
rtp = []
for j in range(N):
rtp.append([])
rtp[j].append(\
1.0/((1.0 - math.exp(-math.exp(-ym[j][0])))*lambdaVal)) #FT-I
for m in range(1, 5):
k = k_W[m - 1] #Weibull
rtp[j].append(math.exp(ym[j][m]**k)/lambdaVal)
# end for loop
standev = np.std(self.Hs, ddof=1) #standard deviation
#Calculate confidence intervals
sigr = [[0.0 for m in range(5)] for j in range(len(ret))]
for m in range(5):
coeff = a1[m]*math.exp(a2[m]*N**(-1.3) + kappa[m]*math.sqrt(-math.log(nu)))
for j in range(len(ret)):
signr = (1.0/math.sqrt(N))*(1.0 + coeff*(yr[j][m] - c[m] + epsi[m]*math.log(nu))**2)**(0.5)
sigr[j][m] = signr*standev
# end for loop
if self.option == 1: #80%
bounds = [[j*1.28 for j in i] for i in sigr]
conf = 80
elif self.option == 2: #85%
bounds = [[j*1.44 for j in i] for i in sigr]
conf = 85
elif self.option == 3: #90%
bounds = [[j*1.65 for j in i] for i in sigr]
conf = 90
elif self.option == 4: #95%
bounds = [[j*1.96 for j in i] for i in sigr]
conf = 95
elif self.option == 5: #99%
bounds = [[j*2.58 for j in i] for i in sigr]
conf = 99
lowbound = []
highbound = []
for j in range(len(Hsr)):
lowbound.append([])
highbound.append([])
for m in range(5):
lowbound[j].append(Hsr[j][m] - bounds[j][m])
highbound[j].append(Hsr[j][m] + bounds[j][m])
#Calculated percent chance for significant wave height
#equaling or exceeding the return period
pe = [[0.0 for j in range(7)] for i in range(7)]
for i in range(7):
for j in range(7):
pe[j][i] = 100.0*(1.0 - (1.0 - 1.0/pret[j])**plen[i])
xxr = []
for i in range(N):
xxr.append([])
for m in range(5):
xxr[i].append(ym[i][m]*alpha[m] + beta[m])
printpe = [[j for j in i] for i in pe]
printside = [2, 5, 10, 25, 50, 100]
printpe[0][0] = 999
for i in range(1, len(printpe)):
printpe[i][0] = printside[i - 1]
for j in range(1, len(printpe[0])):
printpe[0][j] = printside[j - 1]
indexList = [ret.index(2.0), ret.index(5.0), ret.index(10.0),\
ret.index(25.0), ret.index(50.0), ret.index(100.0)]
print("N = %-i NU = %-3.2f NT = %-i K = %-3.2f lambda = %-3.2f\n" %\
(N, nu, Nt, K, lambdaVal))
print("\t\tFT-I\tW (k=0.75) W (k=1.00) W (k=1.40) W (k=2.00)")
print("Corr. coeff.\t%-12.4f%-12.4f%-12.4f%-12.4f%-12.4f" %\
(rxy[0], rxy[1], rxy[2], rxy[3], rxy[4]))
print("Sq. of Resid.\t%-12.4f%-12.4f%-12.4f%-12.4f%-12.4f\n" %\
(sumresid[0], sumresid[1], sumresid[2], sumresid[3], sumresid[4]))
print("Return period\tHs [%s] Hs [%s] Hs [%s] Hs [%s] Hs [%s]" %\
(self.labelUnitDist, self.labelUnitDist, self.labelUnitDist, self.labelUnitDist, self.labelUnitDist))
for m in range(6):
print("%-i\t\t%-10.2f%-10.2f%-10.2f%-10.2f%-10.2f" %\
(ret[indexList[m]], Hsr[indexList[m]][0], Hsr[indexList[m]][1],\
Hsr[indexList[m]][2], Hsr[indexList[m]][3], Hsr[indexList[m]][4]))
val = max(rxy)
C = rxy.index(val)
if C == 0:
print("\nBest fit distribution function: Fisher-Tippett Type I\n")
elif C == 1:
print("\nBest fit distribution function: Weibull Distribution (k=0.75)\n")
elif C == 2:
print("\nBest fit distribution function: Weibull Distribution (k=1.00)\n")
elif C == 3:
print("\nBest fit distribution function: Weibull Distribution (k=1.40)\n")
elif C == 4:
print("\nBest fit distribution function: Weibull Distribution (k=2.00)\n")
print("%i%% Confidence Interval, (Lower Bound - Upper Bound)\nReturn period" % conf)
print("\tFT-I W (k=0.75) W (k=1.00) W (k=1.40) W (k=2.00)")
for m in range(6):
print("%-i\t%-3.1f - %-3.1f %-3.1f - %-3.1f %-3.1f - %-3.1f %-3.1f - %-3.1f %-3.1f - %-3.1f" %\
(ret[indexList[m]], lowbound[indexList[m]][0], highbound[indexList[m]][0],\
lowbound[indexList[m]][1], highbound[indexList[m]][1],
lowbound[indexList[m]][2], highbound[indexList[m]][2],
lowbound[indexList[m]][3], highbound[indexList[m]][3],
lowbound[indexList[m]][4], highbound[indexList[m]][4]))
print("\nPercent Chance for Significant Height Equaling or Exceeding Return Period Hs")
for i in range(len(printpe)):
printLine = ""
for j in range(len(printpe[0])):
if i == 0 and j == 0:
printLine += " "
elif i == 0:
printLine += "%6d" % printpe[i][j]
else:
printLine += "%6d" % round(printpe[i][j])
print(printLine)
# end for loop
dataDict["Sx"] = Sx
dataDict["standev"] = standev
dataDict["alpha"] = alpha
dataDict["beta"] = beta
dataDict["rxy"] = rxy
dataDict["sumresid"] = sumresid
dataDict["yact"] = yact
dataDict["ym"] = ym
dataDict["xxr"] = xxr
dataDict["conf"] = conf
dataDict["Hsr"] = Hsr
dataDict["sigr"] = sigr
dataDict["printside"] = printside
dataDict["indexList"] = indexList
self.fileOutputWriteMain(dataDict)
self.plotDict = {"ret": ret, "Hsr": Hsr, "rtp": rtp,\
"highbound": highbound, "lowbound": lowbound}
# end performCalculations
def fileOutputWriteData(self, dataDict):
distList = ["FISHER-TIPPETT TYPE (FT-I) DISTRIBUTION",\
"WEIBULL DISTRIBUTION k = 0.75",\
"WEIBULL DISTRIBUTION k = 1.00",\
"WEIBULL DISTRIBUTION k = 1.40",\
"WEIBULL DISTRIBUTION k = 2.00"]
self.fileRef.write("EXTREMAL SIGNIFICANT WAVE HEIGHT ANALYSIS\n")
self.fileRef.write("DELFT Data\n\n")
self.fileRef.write("N = %d STORMS\n" % dataDict["N"])
self.fileRef.write("NT = %d STORMS\n" % dataDict["Nt"])
self.fileRef.write("NU = %-6.2f\n" % dataDict["nu"])
self.fileRef.write("K = %-6.2f YEARS\n" % dataDict["K"])
self.fileRef.write("LAMBDA = %-6.2f STORMS PER YEAR\n" % dataDict["lambdaVal"])
if self.errorMsg != None:
self.fileRef.write("\n%s" % self.errorMsg)
else:
self.fileRef.write("MEAN OF SAMPLE DATA = %-6.3f FEET\n" % (dataDict["Sx"]/dataDict["N"]))
self.fileRef.write("STANDARD DEVIATION OF SAMPLE = %-6.3f FEET\n" % dataDict["standev"])
for distIndex in range(len(distList)):
self.fileRef.write("\n%s\n" % distList[distIndex])
self.fileRef.write("F(Hs) = EXP(-EXP(-(Hs-B)/A)) - Equation 1\n")
self.fileRef.write("A = %-6.3f %s\n" % (dataDict["alpha"][distIndex], self.labelUnitDist))
self.fileRef.write("B = %-6.3f %s\n" % (dataDict["beta"][distIndex], self.labelUnitDist))
self.fileRef.write("CORRELATION = %-6.4f\n" % dataDict["rxy"][distIndex])
self.fileRef.write("SUM SQUARE OF RESIDUALS = %-6.4f %s\n" %\
(dataDict["sumresid"][distIndex], self.labelUnitDist))
self.fileRef.write("\nRANK\tHsm\tF(Hs<=Hsm)\tYm\tA*Ym+B\t\tHsm-(A*Ym+B)\n")
self.fileRef.write("\t(Ft)\tEq. 3\t\tEq. 5\tEq. 4 (%s)\t(%s)\n" %\
(self.labelUnitDist, self.labelUnitDist))
for loopIndex in range(len(self.Hs)):
self.fileRef.write("%d\t%-6.2f\t%-6.4f\t\t%-6.3f\t%-6.4f\t\t%-6.4f\n" %\
((loopIndex + 1),\
self.Hs[loopIndex],\
dataDict["yact"][loopIndex][distIndex],\
dataDict["ym"][loopIndex][distIndex],\
dataDict["xxr"][loopIndex][distIndex],\
(self.Hs[loopIndex] - dataDict["xxr"][loopIndex][distIndex])))
self.fileRef.write("\nRETURN PERIOD TABLE with %d%% CONFIDENCE INTERVAL\n" % dataDict["conf"])
self.fileRef.write("\nRETURN\tHs\tSIGR\tHs-1.28*SIGR\tHs+1.28*SIGR\n")
self.fileRef.write("PERIOD\t(%s)\t(%s)\t(%s)\t\t(%s)\n" %\
(self.labelUnitDist, self.labelUnitDist, self.labelUnitDist, self.labelUnitDist))
self.fileRef.write("(Yr)\tEq. 6\tEq. 10\n")
for loopIndex in range(len(dataDict["indexList"])):
self.fileRef.write("%-6.2f\t%-6.2f\t%-6.2f\t%-6.2f\t\t%-6.2f\n" %\
(dataDict["printside"][loopIndex],\
dataDict["Hsr"][dataDict["indexList"][loopIndex]][distIndex],\
dataDict["sigr"][dataDict["indexList"][loopIndex]][distIndex],\
dataDict["Hsr"][dataDict["indexList"][loopIndex]][distIndex] - 1.28*dataDict["sigr"][dataDict["indexList"][loopIndex]][distIndex],\
dataDict["Hsr"][dataDict["indexList"][loopIndex]][distIndex] + 1.28*dataDict["sigr"][dataDict["indexList"][loopIndex]][distIndex]))
# end if
exportData = [dataDict["N"], dataDict["Nt"], dataDict["nu"],\
dataDict["K"], dataDict["lambdaVal"]]
if self.errorMsg != None:
exportData.append(self.errorMsg)
else:
exportData = exportData + [dataDict["Sx"]/dataDict["N"],\
dataDict["standev"]]
for distIndex in range(len(distList)):
exportData = exportData + [dataDict["alpha"][distIndex],\
dataDict["beta"][distIndex], dataDict["rxy"][distIndex],\
dataDict["sumresid"][distIndex]]
for loopIndex in range(len(self.Hs)):
exportData = exportData + [self.Hs[loopIndex],\
dataDict["yact"][loopIndex][distIndex],\
dataDict["ym"][loopIndex][distIndex],\
dataDict["xxr"][loopIndex][distIndex],\
(self.Hs[loopIndex] - dataDict["xxr"][loopIndex][distIndex])]
for loopIndex in range(len(dataDict["indexList"])):
exportData = exportData + [dataDict["printside"][loopIndex],\
dataDict["Hsr"][dataDict["indexList"][loopIndex]][distIndex],\
dataDict["sigr"][dataDict["indexList"][loopIndex]][distIndex],\
dataDict["Hsr"][dataDict["indexList"][loopIndex]][distIndex] -\
1.28*dataDict["sigr"][dataDict["indexList"][loopIndex]][distIndex],\
dataDict["Hsr"][dataDict["indexList"][loopIndex]][distIndex] +\
1.28*dataDict["sigr"][dataDict["indexList"][loopIndex]][distIndex]]
# end if
self.exporter.writeData(exportData)
# end fileOutputWrite
def hasPlot(self):
return True
def performPlot(self):
for i in range(5):
plt.figure((i + 1), figsize = self.plotConfigDict["figSize"],\
dpi = self.plotConfigDict["dpi"])
plotDataHsr = [j[i] for j in self.plotDict["Hsr"]]
plotDataRtp = [j[i] for j in self.plotDict["rtp"]]
plotDataHighbound = [j[i] for j in self.plotDict["highbound"]]
plotDataLowbound = [j[i] for j in self.plotDict["lowbound"]]
plt.semilogx(\
self.plotDict["ret"], plotDataHsr, ":",\
plotDataRtp, self.Hs,\
self.plotDict["ret"], plotDataHighbound, "r--",\
self.plotDict["ret"], plotDataLowbound, "r--")
if i == 0:
plotTitle = "FT-I"
plotLegend = "FT-I Distribution"
elif i == 1:
plotTitle = "Weibull (k=0.75)"
plotLegend = "Weibull (k=0.75)"
elif i == 2:
plotTitle = "Weibull (k=1.00)"
plotLegend = "Weibull (k=1.00)"
elif i == 3:
plotTitle = "Weibull (k=1.40)"
plotLegend = "Weibull (k=1.40)"
elif i == 4:
plotTitle = "Weibull (k=2.00)"
plotLegend = "Weibull (k=2.00)"
plt.title(plotTitle,\
fontsize = self.plotConfigDict["titleFontSize"])
plt.xlabel("Return period [yr]",\
fontsize = self.plotConfigDict["axisLabelFontSize"])
plt.ylabel(r"H$_s$",\
fontsize = self.plotConfigDict["axisLabelFontSize"])
plt.legend([plotLegend, "Data", "Confidence Bounds",\
"Location", "SouthEast"])
# end for loop
plt.show()
# end performPlot
# Override to prevent creating additional output file
def fileOutputPlotInit(self):
pass
driver = ExtHsAnalysis() | gpl-3.0 |
hugobowne/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 22 | 25505 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
HFO-detect/HFO-detect-python | pyhfo_detect/core/cs_detector.py | 1 | 18781 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 20 14:27:15 2017
Ing.,Mgr. (MSc.) Jan CimbΓ‘lnΓk, PhD.
Biomedical engineering
International Clinical Research Center
St. Anne's University Hospital in Brno
Czech Republic
&
Mayo systems electrophysiology lab
Mayo Clinic
200 1st St SW
Rochester, MN
United States
"""
# Std imports
from multiprocessing import Pool
# Third pary imports
import numpy as np
import pandas as pd
from scipy.signal import butter, filtfilt, hilbert
from scipy.special import gammaincinv
# Local imports
from ..io.data_operations import create_output_df
# %% CS detector
#def cs_detect(data, fs, low_fc, high_fc,
# threshold, band_detections = True,
# stat_window_size = 10, cycs_per_detect = 4):
# """
# CS detection algorithm.
#
# CIMBΓLNΓK, Jan, Angela HEWITT, Greg WORRELL and Matt STEAD. \n
# The CS Algorithm: A Novel Method for High Frequency Oscillation \n
# Detection in EEG. Journal of Neuroscience Methods [online]. \n
# 2017, vol. 293, pp. 6β16. ISSN 01650270.\n
# Available at: doi:10.1016/j.jneumeth.2017.08.023
#
#
#
# Parameters:
# -----------
# data(1-d numpy array) - raw data\n
# fs(int) - sampling frequency\n
# low_fc(float) - low cut-off frequency\n
# high_fc(float) - high cut-off frequency\n
# stat_window_size(float) - statistical window size in secs (default = 10)\n
# det_window_size(float) - number of cycles in secs (default = 5)\n
#
# Returns:
# --------
# df_out(pandas.DataFrame) - output dataframe with detections\n
# """
#
# # Create output dataframe
#
# df_out = create_output_df()
#
# return
def cs_detect_beta(data, fs, low_fc, high_fc,
threshold, band_detections = True,
stat_window_size = 10, cycs_per_detect = 4,
mp = 1):
"""
Beta version of CS detection algorithm. Which was used to develop \n
CS detection algorithm.
CIMBΓLNΓK, Jan, Angela HEWITT, Greg WORRELL and Matt STEAD. \n
The CS Algorithm: A Novel Method for High Frequency Oscillation \n
Detection in EEG. Journal of Neuroscience Methods [online]. \n
2017, vol. 293, pp. 6β16. ISSN 01650270.\n
Available at: doi:10.1016/j.jneumeth.2017.08.023
Parameters:
-----------
data(1-d numpy array) - raw data\n
fs(int) - sampling frequency\n
low_fc(float) - low cut-off frequency\n
high_fc(float) - high cut-off frequency\n
band_detections - output includes bandwise detections (default=True)\n
stat_window_size(float) - statistical window size in secs (default = 10)\n
det_window_size(float) - number of cycles in secs (default = 4)\n
mp(int) - number of cores to use (def = 1)\n
Returns:
--------
df_out(pandas.DataFrame) - output dataframe with detections\n
"""
# Create output dataframe
df_out = create_output_df(fields=['low_fc','high_fc',
'amp','fhom','dur','prod',
'type'])
# TODO - move the settings to a data file
constants = {'BAND_STARTS':[44,52,62,73,86,102,121,143,169,199,237,280,332,392,464,549,650],
'BAND_CENTERS':[52,62,73,86,102,121,143,169,199,237,280,332,392,464,549,650,769],
'BAND_STOPS':[62,73,86,102,121,143,169,199,237,280,332,392,464,549,650,769,909],
'AMP_KS':[1.13970939, 0.90183703, 1.26436011, 1.03769074, 0.85849874, 0.94987266, 0.80845992, 1.67940963, 1.04080418, 1.24382275, 1.60240884, 1.10695014, 1.17010383, 0.88196648, 1.04245538, 0.70917389, 2.21536184],
'AMP_THETAS':[1.65277574, 3.48530721, 2.98961385, 11.54210813, 18.93869204, 10.11982852, 10.53609476, 5.91562993, 11.09205920, 8.84505258, 6.92641365, 18.89938640, 23.76501855, 30.42839963, 27.30653900, 22.48544327, 0.08329301],
'AMP_OFFSETS':[6.41469207, 6.39345582, 6.40000914, 7.32380252, 8.32055181, 8.58559154, 8.27742490, 9.97358643, 10.49550234, 12.41888242, 15.86698463, 21.34769474, 21.89082728, 17.18456284, 18.93825748, 16.30660646, 7.69330283],
'FHOM_KS':[1.66197234, 1.00540463, 1.79692941, 1.15586041, 1.02455216, 1.21727010, 1.12610054, 0.70076969, 0.98379084, 1.54577304, 1.51861533, 1.23976157, 1.43199934, 1.17238163, 0.58636256, 1.12205645, 0.09508500],
'FHOM_THETAS':[4.71109440, 6.05698300, 3.84238418, 6.23370380, 7.89603172, 7.87712768, 8.45272550, 10.00101086, 6.58376596, 3.53488296, 5.27183305, 6.36805821, 7.56839088, 8.24757240, 14.90634368, 18.85016717, 260.59793175],
'FHOM_OFFSETS':[8.16878678, 10.55275451, 8.07166998, 8.07086829, 8.94105317, 7.75703706, 7.89853517, 7.14019430, 8.17322770, 8.55596745, 6.90226263, 7.17550663, 7.77665423, 9.07663424, 14.82474643, 20.20094041, 17.71110000],
'PROD_KS':[0.84905609, 1.01954096, 1.58872304, 1.88690171, 1.27908635, 1.06280570, 0.92824868, 1.49057163, 1.38457279, 2.14489528, 1.35910370, 1.44452982, 1.89318549, 0.92291990, 0.97845756, 1.42279817, 0.09633877],
'PROD_THETAS':[5.84241875, 2.72996718, 3.68246691, 6.69128325, 10.43308700, 11.90997028, 13.04316866, 6.93301203, 8.31241387, 4.62399907, 7.32859575, 11.79756235, 12.32143937, 26.04107818, 17.76146131, 18.81871472, 195.40205368],
'PROD_OFFSETS':[16.32704840, 19.47650057, 16.18710622, 16.34553372, 19.25022797, 18.30852676, 18.15222002, 18.98117587, 19.84269749, 21.64225522, 24.19732683, 25.65335524, 26.52948797, 24.05945634, 38.10559556, 34.94781992, 20.41020467],
'DUR_KS':[0.94831016, 1.20644724, 1.19723676, 1.24834990, 1.72876216, 1.88991915, 1.45709687, 1.76097598, 1.42626762, 1.81104799, 2.09379726, 2.28979796, 1.92883462, 2.15155894, 1.14187099, 1.42071107, 0.38495461],
'DUR_THETAS':[0.04543605, 0.04113687, 0.03842913, 0.03390445, 0.02099894, 0.01687568, 0.01622539, 0.00794505, 0.00857187, 0.00499798, 0.00489236, 0.00462047, 0.00532479, 0.00263985, 0.00623849, 0.01249162, 0.00115305],
'DUR_OFFSETS':[0.10320000, 0.09316255, 0.06500000, 0.05480000, 0.04420000, 0.03220000, 0.02820000, 0.02580000, 0.02291436, 0.01940000, 0.01760000, 0.01500000, 0.01180000, 0.01000000, 0.01180000, 0.01500000, 0.00844698]}
nyquist = (fs / 2) - 1
n_bands = len([x for x in constants['BAND_STOPS'] if x <= nyquist])
edge_thresh = 0.1
df_i = 0
stat_win_samp = int(fs * stat_window_size)
norm_coefs = [round(stat_win_samp / 3), round((2 * stat_win_samp) / 3)]
start_samp = 0
stop_samp = start_samp + stat_win_samp
conglom_arr = np.zeros([n_bands,stat_win_samp],'bool')
#Create a pool of workers
if mp > 1: work_pool = Pool(mp)
while stop_samp <= len(data):
x = data[start_samp:stop_samp]
if mp > 1:
iter_args = []
for band_idx in range(n_bands):
iter_args.append([x, fs, norm_coefs, band_idx,
cycs_per_detect, threshold,
edge_thresh, constants])
band_concat = work_pool.map(detect_band,iter_args)
work_pool.join
new_dets_df = pd.concat([res[0] for res in band_concat])
df_out = pd.concat([df_out,new_dets_df])
event_cnt = len(new_dets_df)
for band_idx in range(n_bands):
conglom_arr[band_idx,:] = band_concat[band_idx][1]
else:
event_cnt = 0
for band_idx in range(n_bands):
args = [x, fs, norm_coefs, band_idx, cycs_per_detect, threshold,
edge_thresh, constants]
res = detect_band(args)
conglom_arr[band_idx,:] = res[1]
event_cnt += len(res[0])
df_out = pd.concat([df_out,res[0]])
df_out.reset_index(drop=True, inplace=True)
# Create congloms
conglom_1d = np.sum(conglom_arr,0)
new_det_idx = len(df_out)
if any(conglom_1d):
det_locs = np.where(conglom_1d)[0]
starts_det_locs = np.where(np.diff(det_locs)>1)[0]+1
stops_det_locs = np.where(np.diff(det_locs)>1)[0]
if len(starts_det_locs):
det_starts = np.concatenate([[det_locs[0]],
det_locs[starts_det_locs]])
det_stops = np.concatenate([det_locs[stops_det_locs],
[det_locs[-1]]])
else:
det_starts = np.array([det_locs[0]])
det_stops = np.array([det_locs[-1]])
det_stops += 1
new_det_idx -= event_cnt
sub_df = df_out.loc[new_det_idx:]
# Insert congloms
df_i = len(df_out)
for event_start,event_stop in zip(det_starts,det_stops):
det_df = sub_df.loc[(sub_df.event_start >= event_start)
& (sub_df.event_stop <= event_stop)]
low_fc = det_df.loc[:,'low_fc'].min()
high_fc = det_df.loc[:,'high_fc'].max()
amp = det_df.loc[:,'amp'].max()
fhom = det_df.loc[:,'fhom'].max()
prod = det_df.loc[:,'amp'].max()
dur = float(event_stop - event_start) / fs
df_out.loc[df_i] = [event_start, event_stop,
low_fc, high_fc,
amp, fhom, dur, prod,
'conglom']
df_i += 1
# Reset conglom array
conglom_arr[:,:] = 0
# Adjust starts / stops
if len(df_out):
df_out.loc[new_det_idx:,'event_start'] += start_samp
df_out.loc[new_det_idx:,'event_stop'] += start_samp
start_samp += stat_win_samp
stop_samp += stat_win_samp
if mp > 1: work_pool.close()
if not band_detections:
df_out = df_out[~(df_out.type == 'band')]
df_out.reset_index(drop=True,inplace=True)
return df_out
# =============================================================================
# Subfunctions
# =============================================================================
def detect_band(args):
"""
Runs detection in one frequency band
"""
x = args[0]
fs = args[1]
norm_coefs = args[2]
band_idx = args[3]
cycs_per_detect = args[4]
threshold = args[5]
edge_thresh = args[6]
constants = args[7]
conglom_band = np.zeros(len(x),'bool')
df_out = create_output_df(fields=['low_fc','high_fc',
'amp','fhom','dur','prod',
'type'])
df_i = 0
wind_secs = cycs_per_detect / constants['BAND_CENTERS'][band_idx]
b,a = butter(3,[(constants['BAND_CENTERS'][band_idx] / 4)/(fs/2),constants['BAND_STOPS'][band_idx]/(fs/2)],'bandpass')
bp_x = filtfilt(b,a,x)
b,a = butter(3,[constants['BAND_STARTS'][band_idx]/(fs/2),constants['BAND_STOPS'][band_idx]/(fs/2)],'bandpass')
np_x = filtfilt(b,a,x)
h = hilbert(np_x)
x_amps = np.abs(h)
np_x_f = np.cos(np.angle(h))
h = hilbert(bp_x)
bp_x_f = np.cos(np.angle(h))
x_fhoms = sliding_snr(np_x_f,bp_x_f,fs,wind_secs)
# Normalisation
# p1 = round(stat_win_samp / 3)
# p2 = round((2 * stat_win_samp) / 3)
sort_arr = np.sort(x_amps)
amp_dev = (sort_arr[norm_coefs[1]] - sort_arr[norm_coefs[0]]) / 2
x_amps = (x_amps - sort_arr[norm_coefs[1]]) / amp_dev
sort_arr = np.sort(x_fhoms)
fhom_dev = (sort_arr[norm_coefs[1]] - sort_arr[norm_coefs[0]]) / 2
x_fhoms = (x_fhoms - sort_arr[norm_coefs[1]]) / fhom_dev
x_prods = x_amps * x_fhoms
for i in range(len(x_prods)):
if (x_fhoms[i] < 0) and (x_amps[i] < 0):
x_prods[i] = -x_prods[i]
if x_prods[i] < 0:
x_prods[i] = -np.sqrt(-x_prods[i])
else:
x_prods[i] = np.sqrt(x_prods[i])
sort_arr = np.sort(x_prods)
prod_dev = (sort_arr[norm_coefs[1]] - sort_arr[norm_coefs[0]]) / 2
x_prods = (x_prods - sort_arr[norm_coefs[1]]) / prod_dev
# Threshold calculation
amp_min = inverse_gamma_cdf(threshold,
constants['AMP_KS'][band_idx],
constants['AMP_THETAS'][band_idx],
constants['AMP_OFFSETS'][band_idx])
amp_max = 5 * inverse_gamma_cdf(.99,
constants['AMP_KS'][band_idx],
constants['AMP_THETAS'][band_idx],
constants['AMP_OFFSETS'][band_idx])
fhom_min = inverse_gamma_cdf(threshold,
constants['FHOM_KS'][band_idx],
constants['FHOM_THETAS'][band_idx],
constants['FHOM_OFFSETS'][band_idx])
fhom_max = 5 * inverse_gamma_cdf(.99,
constants['FHOM_KS'][band_idx],
constants['FHOM_THETAS'][band_idx],
constants['FHOM_OFFSETS'][band_idx])
prod_min = inverse_gamma_cdf(threshold,
constants['PROD_KS'][band_idx],
constants['PROD_THETAS'][band_idx],
constants['PROD_OFFSETS'][band_idx])
prod_max = 5 * inverse_gamma_cdf(.99,
constants['PROD_KS'][band_idx],
constants['PROD_THETAS'][band_idx],
constants['PROD_OFFSETS'][band_idx])
dur_min = inverse_gamma_cdf(threshold,
constants['DUR_KS'][band_idx],
constants['DUR_THETAS'][band_idx],
constants['DUR_OFFSETS'][band_idx])
dur_max = 5 * inverse_gamma_cdf(.99,
constants['DUR_KS'][band_idx],
constants['DUR_THETAS'][band_idx],
constants['DUR_OFFSETS'][band_idx])
# Detect
j = 0
while j < len(x):
if x_prods[j] > edge_thresh:
event_start = j
j += 1
while j < len(x) and x_prods[j] > edge_thresh:
j += 1
event_stop = j
#Calculate duration
dur = float(event_stop - event_start + 1) / fs
if (dur < dur_min) or (dur > dur_max):
j += 1
continue
dur_scale = np.sqrt(dur / wind_secs)
#Calculate amplitude
amp = np.mean(x_amps[event_start:event_stop])
amp = amp * dur_scale
if (amp < amp_min) or (amp > amp_max):
j += 1
continue
#Calculate fhom
fhom = np.mean(x_fhoms[event_start:event_stop])
fhom = fhom * dur_scale
if (fhom < fhom_min) or (fhom > fhom_max):
j += 1
continue
#Calculate product
prod = np.mean(x_prods[event_start:event_stop])
prod = prod * dur_scale
if (prod < prod_min) or (prod > prod_max):
j += 1
continue
conglom_band[event_start:event_stop] = 1
#Put in output-df
df_out.loc[df_i] = [event_start, event_stop,
constants['BAND_STARTS'][band_idx],
constants['BAND_STOPS'][band_idx],
amp, fhom, dur, prod,
'band']
df_i += 1
else:
j += 1
return [df_out, conglom_band]
def inverse_gamma_cdf(p,k,theta,offset):
"""
Inverse gamma cumulative distribution function.
"""
x = gammaincinv(k,p)
x = (x * theta) + offset
return x
def sliding_snr(np_x,bp_x,Fs,wind_secs):
"""
"Signal-to-noise ratio" like metric that compares narrow band and broad
band signals to eliminate increased power generated by sharp transients.
Parameters:
-----------
np_x - narrow band signal\n
bp_x - broad band signal\n
fs - sampling frequency\n
wind_secs - sliding window size (seconds)\n
Returns:
--------
snr - "Signal-to-noise ratio" like metric
"""
#Define starting values
wind = Fs*wind_secs
half_wind = int(round(wind/2))
wind = int(round(wind))
N = min([len(np_x),len(bp_x)])
snr=np.zeros([N])
npxx = 0
bpxx = 0
#Fill in the beginning and initial window values
for i in range(int(wind)):
t1 = np_x[i]
npxx = npxx + (t1 * t1)
t2 = bp_x[i] - t1
bpxx = bpxx + (t2 * t2)
np_rms = np.sqrt(float(npxx) / wind)
bp_rms = np.sqrt(float(bpxx) / wind)
snr[:half_wind] = (np_rms / bp_rms)
#Slide the window
np_x_sqr = np.square(np_x)
np_bp_x_diff_sqr = np.square(bp_x - np_x)
if wind%2:
np_x_sqr_diffs = np_x_sqr[2*half_wind:] - np_x_sqr[1:-((2*half_wind)-1)]
np_bp_x_diff_sqr_diffs = np_bp_x_diff_sqr[2*half_wind:] - np_bp_x_diff_sqr[1:-((2*half_wind)-1)]
else:
np_x_sqr_diffs = np_x_sqr[2*half_wind-1:-1] - np_x_sqr[1:-((2*half_wind)-1)]
np_bp_x_diff_sqr_diffs = np_bp_x_diff_sqr[2*half_wind-1:-1] - np_bp_x_diff_sqr[1:-((2*half_wind)-1)]
npxx_sig = np.cumsum(np_x_sqr_diffs)+npxx
bpxx_sig = np.cumsum(np_bp_x_diff_sqr_diffs)+bpxx
snr[half_wind:-half_wind] = npxx_sig / bpxx_sig
# ----- Original code -----
# Slide the window
# i = 1
# for k in range(int(N-wind+1)):
# p = k + wind - 1
#
# #Beginning of the window
# t1 = np_x[i]
# npxx = npxx - (t1 * t1)
# t2 = bp_x[i] - t1
# bpxx = bpxx - (t2 * t2)
#
# #End of the window
# t1 = np_x[p]
# npxx = npxx + (t1 * t1)
# t2 = bp_x[p] - t1
# bpxx = bpxx + (t2 * t2)
#
# np_rms = np.sqrt(float(npxx) / wind) # Unnecessary to divide by wind
# bp_rms = np.sqrt(float(bpxx) / wind)
#
# snr[k+half_wind] = (np_rms/bp_rms)
#
# i += 1
# ----- -----
#Fill in the end
snr[-half_wind:] = snr[-(half_wind+1)]
return snr
| bsd-3-clause |
ercius/openNCEM | setup.py | 1 | 4143 | """A setuptools based setup module.
See https://packaging.python.org/en/latest/distributing.html
Addapted from https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from codecs import open
from os import path
from setuptools import setup , find_packages
# To use a consistent encoding
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'ncempy/long_description.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ncempy',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.8.1',
description='openNCEM\'s Python Package',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ercius/openNCEM',
# Author details
author='P. Ercius, F. Niekiel, C. Ophus, T. Pekin, Z. Gainsforth',
author_email='percius@lbl.gov',
# Choose your license
license='GPLv3+, MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
# What does your project relate to?
keywords='electron microscopy image diffraction processing simulation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'test', 'data']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'scipy', 'matplotlib', 'h5py>=2.9.0'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'edstomo': ['glob2', 'genfire', 'hyperspy', 'scikit-image', 'scipy', 'matplotlib', 'ipyvolume']
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'edstomo': ['Elam/ElamDB12.txt'],
# },
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('ncempy', ['ncempy/edstomo/Elam/ElamDB12.txt'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'ncem2png=ncempy.command_line.ncem2png:main',
],
},
)
| gpl-3.0 |
mesnardo/PetIBM | examples/decoupledibpm/cylinder2dRe550_GPU/scripts/plotVorticity.py | 3 | 1384 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 1200 time steps (3 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
# Read vorticity field and its grid from files.
name = 'wz'
filepath = simu_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 1200
filepath = simu_dir / 'solution' / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-32.0, 32.0, 32)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-0.6, 1.6)
ax.set_ylim(-0.8, 0.8)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
niamoto/niamoto-core | niamoto/data_providers/plantnote_provider/plantnote_occurrence_provider.py | 1 | 4162 | # coding: utf-8
from sqlalchemy import *
import pandas as pd
from niamoto.data_providers.base_occurrence_provider import \
BaseOccurrenceProvider
class PlantnoteOccurrenceProvider(BaseOccurrenceProvider):
"""
Pl@ntnote Occurrence Provider.
Provide occurrences from a Pl@ntnote database. The Pl@ntnote database
must have previously been converted to a SQLite3 database.
"""
def __init__(self, data_provider, plantnote_db_path):
super(PlantnoteOccurrenceProvider, self).__init__(data_provider)
self.plantnote_db_path = plantnote_db_path
def get_provider_occurrence_dataframe(self):
db_str = 'sqlite:///{}'.format(self.plantnote_db_path)
eng = create_engine(db_str)
connection = eng.connect()
try:
metadata = MetaData()
metadata.reflect(eng)
# Needed tables
occ_table = metadata.tables['Individus']
obs_table = metadata.tables['Observations']
det_table = metadata.tables['DΓ©terminations']
inv_table = metadata.tables['Inventaires']
loc_table = metadata.tables['LocalitΓ©s']
# Id columns for joining
id_occ_obs = occ_table.c["ID Observations"]
id_obs = obs_table.c["ID Observations"]
id_occ_det = occ_table.c["ID DΓ©terminations"]
id_det = det_table.c["ID DΓ©terminations"]
id_occ_inv = occ_table.c["ID Inventaires"]
id_inv = inv_table.c["ID Inventaires"]
id_inv_loc = inv_table.c["ID Parcelle"]
id_loc = loc_table.c["ID LocalitΓ©s"]
loc_col = "SRID=4326;POINT(" + \
type_coerce(loc_table.c["LongDD"], String) + \
' ' + \
type_coerce(loc_table.c["LatDD"], String) + \
')'
sel = select([
occ_table.c["ID Individus"].label('id'),
func.coalesce(
det_table.c["ID Taxons"],
None
).label('taxon_id'),
loc_col.label("location"),
occ_table.c["Dominance"].label("strata"),
occ_table.c["wood_density"].label("wood_density"),
occ_table.c["leaves_sla"].label("leaves_sla"),
occ_table.c["bark_thickness"].label("bark_thickness"),
func.coalesce(obs_table.c["DBH"], None).label("dbh"),
func.coalesce(obs_table.c["hauteur"], None).label("height"),
func.coalesce(obs_table.c["nb_tiges"], None).label("stem_nb"),
func.coalesce(obs_table.c["statut"], None).label("status"),
obs_table.c["date_observation"],
]).select_from(
occ_table.outerjoin(
obs_table,
id_occ_obs == id_obs
).outerjoin(
det_table,
id_occ_det == id_det
).outerjoin(
inv_table,
id_occ_inv == id_inv
).join(
loc_table,
id_inv_loc == id_loc
)
).order_by(
obs_table.c["date_observation"].desc(),
det_table.c["Date DΓ©termination"].desc(),
).group_by(
occ_table.c["ID Individus"],
)
df = pd.read_sql(sel, connection, index_col="id")
property_cols = [
"strata",
"wood_density",
"leaves_sla",
"bark_thickness",
"dbh",
"height",
"stem_nb",
"status",
"date_observation",
]
properties = df[property_cols].apply(
lambda x: x.to_json(force_ascii=False),
axis=1
)
df.drop(property_cols, axis=1, inplace=True)
df['properties'] = properties
return df
except:
raise
finally:
if connection:
connection.close()
eng.dispose()
| gpl-3.0 |
balazsdukai/GEO1005-StormManager | SpatialDecision/external/networkx/convert_matrix.py | 7 | 33333 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_pygraphviz, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
try:
import pandas
except:
raise SkipTest("Pandas not available")
| gpl-2.0 |
gilyclem/larVolumeToObj | larVolumeToObjG/computation/step_calcchains_serial_tobinary_filter_proc_lisa.py | 2 | 18114 | # -*- coding: utf-8 -*-
from lar import *
from scipy import *
import json
# import scipy
import numpy as np
# import time as tm
# import gc
# from pngstack2array3d import *
import struct
import getopt
import traceback
#
import matplotlib.pyplot as plt
# threading
import multiprocessing
from multiprocessing import Process, Value, Lock
from Queue import Queue
# cython stuf. not used now
import pyximport; pyximport.install()
import calc_chains_helper as cch
import logging
logger = logging.getLogger(__name__)
# ---------------mjirik
# import funkcΓ z jinΓ©ho adresΓ‘Εe
import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../../../lisa/src"))
from io3d import datareader
# ------------------------------------------------------------
# Logging & Timer
# ------------------------------------------------------------
logging_level = 2
# 0 = no_logging
# 1 = few details
# 2 = many details
# 3 = many many details
def log(n, l):
if __name__ == "__main__" and n <= logging_level:
for s in l:
print "Log:", s
# ------------------------------------------------------------
# Configuration parameters
# ------------------------------------------------------------
PNG_EXTENSION = ".png"
BIN_EXTENSION = ".bin"
# ------------------------------------------------------------
# Utility toolbox
# ------------------------------------------------------------
def invertIndex(nx, ny, nz):
nx, ny, nz = nx+1, ny+1, nz+1
def invertIndex0(offset):
a0, b0 = offset / nx, offset % nx
a1, b1 = a0 / ny, a0 % ny
a2, b2 = a1 / nz, a1 % nz
return b0, b1, b2
return invertIndex0
def countFilesInADir(directory):
return len(os.walk(directory).next()[2])
def isArrayEmpty(arr):
return all(e == 0 for e in arr)
def writeOffsetToFile(file, offsetCurr):
file.write(struct.pack('>I', offsetCurr[0]))
file.write(struct.pack('>I', offsetCurr[1]))
file.write(struct.pack('>I', offsetCurr[2]))
def writeDataToFile(fileToWrite, offsetCurr, objectBoundaryChain):
writeOffsetToFile(
fileToWrite,
offsetCurr )
databytes = np.array(objectBoundaryChain.toarray().astype('b').flatten())
fileToWrite.write( bytearray(databytes))
def read_pkl_by_block(datap, startImage, endImage, centroidsCalc):
segmentation = datap['segmentation'][startImage:endImage:, :, :]
# print np.unique(segmentation)
# segmentation = datap['segmentation'][startImage:endImage, ::5, ::5]
# segmentation[:10,:15,:20]=2
# segmentation[:05,:10,:15]=1
# switch colors
# segmentation[segmentation==wanted_label] = 24
# segmentation[segmentation!=wanted_label] = 1
# segmentation[segmentation==2] = 24
# segmentation[segmentation2] = 1
# segmentation[segmentation==24] = 0
# segmentation = (segmentation == wanted_label).astype(np.uint8)
# print segmentation==2
# print 'suniq ', np.unique(segmentation)
# theColors=np.array([[0, 1]])
return segmentation
def computeChainsThread(
startImage, endImage, imageHeight, imageWidth,
imageDx, imageDy, imageDz,
Nx, Ny, Nz,
calculateout, BORDER_FILE,
centroidsCalc, colorIdx, datap, DIR_O
):
# centroidsCalc - remove
# @TODO use centroidsCalc
# print 'cC '
# print centroidsCalc
# centroidsCalc = np.array([[0],[ 1]])
log(2, [ "Working task: " + str(startImage) + "-" + str(endImage) + " [" +
str( imageHeight) + "-" + str( imageWidth ) + "-" + str(imageDx) +
"-" + str( imageDy) + "-" + str (imageDz) + "]" ])
bordo3 = None
if (calculateout == True):
with open(BORDER_FILE, "r") as file:
bordo3_json = json.load(file)
ROWCOUNT = bordo3_json['ROWCOUNT']
COLCOUNT = bordo3_json['COLCOUNT']
ROW = np.asarray(bordo3_json['ROW'], dtype=np.int32)
COL = np.asarray(bordo3_json['COL'], dtype=np.int32)
if np.isscalar(bordo3_json['DATA']):
# in special case, when all numbers are same
logger.debug('bordermatrix data stored as scalar 1')
DATA = np.ones(COL.shape, dtype=np.int8) *\
np.int8(bordo3_json['DATA'])
else:
# this is general form
logger.debug(
'bordermatrix data stored in general form')
DATA = np.asarray(bordo3_json['DATA'], dtype=np.int8)
# print "border m ", ROW.shape, COL.shape, DATA.shape
# print "55555555555555555555555555555555555555"
bordo3 = csr_matrix(
(DATA, COL, ROW), shape=(ROWCOUNT, COLCOUNT))
xEnd, yEnd = 0, 0
beginImageStack = 0
# @TODO do something with the input colorNumber
saveTheColors = centroidsCalc
# saveTheColors = np.array([1,0])
saveTheColors = np.array(
sorted(saveTheColors.reshape(1, len(centroidsCalc))[0]), dtype=np.int
)
fileName = "pselettori-"
if (calculateout == True):
fileName = "poutput-"
fileName = fileName + str(startImage) + "_" + str(endImage) + "-"
returnProcess = 0
try:
fullfilename = DIR_O + '/' +\
fileName + str(saveTheColors[colorIdx]) + BIN_EXTENSION
logger.debug("file to write " + fullfilename)
fileToWrite = open(fullfilename, "wb")
try:
log(2, ["Working task: " +
str(startImage) + "-" +
str(endImage) + " [loading colors]"])
# theImage,colorNumber,theColors = pngstack2array3d(
# imageDir, startImage, endImage, colorNumber,
# pixelCalc, centroidsCalc)
# imageDirPkl = "data.pklz"
# -------------------------------
theImage = read_pkl_by_block(
datap,
startImage, endImage,
centroidsCalc)
# colorIdx = 2
# print "orig shape ", datap['segmentation'].shape
# print "png stack"
# print 'startim ', startImage
# print 'endim', endImage
# print 'unique', np.unique(theImage)
# print 'centrCol ', centroidsCalc
# print 'saveTheColors ', saveTheColors, colorIdx
# print 'calculateout ', calculateout
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# theColors = theColors.reshape(1,colorNumber)
# if (sorted(theColors[0]) != saveTheColors):
# log(1, [ "Error: colorNumber have changed"] )
# sys.exit(2)
log(2, ["Working task: " +
str(startImage) + "-" +
str(endImage) + " [comp loop]" ])
for xBlock in xrange(imageHeight / imageDx):
for yBlock in xrange(imageWidth/imageDy):
xStart, yStart = xBlock * imageDx, yBlock * imageDy
xEnd, yEnd = xStart+imageDx, yStart+imageDy
image = theImage[:, xStart:xEnd, yStart:yEnd]
# print "image ", image
nz, nx, ny = image.shape
# Compute a quotient complex of chains with constant field
# ------------------------------------------------------------
chains3D_old = []
chains3D = None
hasSomeOne = False
if (calculateout != True):
chains3D = np.zeros(nx * ny * nz, dtype=np.int32)
zStart = startImage - beginImageStack
if (calculateout == True):
chains3D_old = cch.setList(
nx, ny, nz, colorIdx, image, saveTheColors)
else:
hasSomeOne, chains3D = cch.setListNP(
nx, ny, nz, colorIdx, image, saveTheColors)
# Compute the boundary complex of the quotient cell
objectBoundaryChain = None
if (calculateout == True) and (len(chains3D_old) > 0):
objectBoundaryChain = larBoundaryChain(
bordo3, chains3D_old)
# print objectBoundaryChain
# brd = bordo3.todense()
# print "chains3D_old"
# print chains3D_old
# print len(chains3D_old)
# print "objectBoundaryChain s",
# if objectBoundaryChain is not None:
# # print objectBoundaryChain
# print "e ", objectBoundaryChain.todense().shape
# print objectBoundaryChain.toarray().astype('b').flatten()
# Save
if (calculateout == True):
if (objectBoundaryChain != None):
writeDataToFile(
fileToWrite,
np.array(
[zStart, xStart, yStart], dtype=int32),
objectBoundaryChain)
else:
if (hasSomeOne != False):
writeOffsetToFile(
fileToWrite,
np.array([zStart, xStart, yStart], dtype=int32)
)
fileToWrite.write(
bytearray(np.array(
chains3D, dtype=np.dtype('b'))))
except:
import traceback
logger.debug(traceback.format_exc())
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
# Log it or whatever here
log(1, ["Error: " + ''.join('!! ' + line for line in lines)])
returnProcess = 2
finally:
fileToWrite.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log(1, ["Error: " + ''.join('!! ' + line for line in lines)])
returnProcess = 2
return returnProcess
processRes = []
def collectResult(resData):
processRes.append(resData)
def startComputeChains(
imageHeight, imageWidth, imageDepth,
imageDx, imageDy, imageDz,
Nx, Ny, Nz, calculateout, BORDER_FILE,
centroidsCalc, colorIdx, datap, DIR_O
):
beginImageStack = 0
endImage = beginImageStack
saveTheColors = centroidsCalc
log(2, [centroidsCalc])
saveTheColors = np.array(
sorted(saveTheColors.reshape(1, len(centroidsCalc))[0]), dtype=np.int)
log(2, [saveTheColors])
# print str(imageHeight) + '-' + str(imageWidth) + '-' + str(imageDepth)
# print str(imageDx) + '-' + str(imageDy) + '-' + str(imageDz)
# print str(Nx) + '-' + str(Ny) + '-' + str(Nz)
returnValue = 2
processPool = max(1, multiprocessing.cpu_count() / 2)
log(2, ["Starting pool with: " + str(processPool)])
try:
pool = multiprocessing.Pool(processPool)
log(2, ['Start pool'])
for j in xrange(imageDepth / imageDz):
startImage = endImage
endImage = startImage + imageDz
log(2, [ "Added task: " + str(j) + " -- (" + str(startImage) + "," + str(endImage) + ")" ])
pool.apply_async(
computeChainsThread,
args = (startImage, endImage, imageHeight, imageWidth,
imageDx, imageDy, imageDz, Nx, Ny, Nz, calculateout,
BORDER_FILE, centroidsCalc,
colorIdx, datap, DIR_O, ),
callback = collectResult)
log(2, [ "Waiting for completion..." ])
pool.close()
pool.join()
log(1, [ "Completed: " + str(processRes) ])
if (sum(processRes) == 0):
returnValue = 0
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log(1, [ "Error: " + ''.join('!! ' + line for line in lines) ]) # Log it or whatever here
return returnValue
def segmentation_from_data3d(datap, label):
datap['segmentation'] = (datap['data3d'] > label).astype(np.uint8) * 2
# datap['segmentation'][:30,:30,:30] = 1
return datap
def runComputation(imageDx, imageDy, imageDz, coloridx, calculateout,
V, FV, input_pkl_file, BORDER_FILE, DIR_O, label):
dr = datareader.DataReader()
datap = dr.Get3DData(input_pkl_file, qt_app=None, dataplus_format=True,
gui=False)
if 'segmentation' not in datap.keys():
datap = segmentation_from_data3d(datap, label)
segmentation = datap['segmentation'].astype(np.uint8)
# segmentation = datap['segmentation'][::5,::5,::5]
# segmentation = datap['segmentation'][300:330,300:350,300:350]
# datap['segmentation'] = (segmentation == 1).astype(np.uint8)
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# hack kvuli barvam
segmentation[0, 0, 0] = 0
segmentation[0, 0, 1] = 1
datap['segmentation'] = segmentation
logger.debug("unique %s " %(str(np.unique(datap['segmentation']))))
imageHeight, imageWidth = datap['segmentation'][:,:,:].shape[1:3]
# getImageData(INPUT_DIR+str(BEST_IMAGE)+PNG_EXTENSION)
# imageDepth = countFilesInADir(INPUT_DIR)
imageDepth = datap['segmentation'].shape[0]
Nx, Ny, Nz = imageHeight/imageDx, imageWidth/imageDx, imageDepth/imageDz
returnValue = 2
try:
# pixelCalc, centroidsCalc = centroidcalc(INPUT_DIR, BEST_IMAGE, colors)
# centroidsCalc = np.array([0, 1])
centroidsCalc = np.unique(datap['segmentation'])
returnValue = startComputeChains(
imageHeight, imageWidth, imageDepth,
imageDx, imageDy, imageDz, Nx, Ny, Nz,
calculateout, BORDER_FILE,
centroidsCalc, coloridx,
datap, DIR_O)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log(1, [ "Error: " + ''.join('!! ' + line for line in lines) ]) # Log it or whatever here
returnValue = 2
tracert.print_exc()
return returnValue
def main(argv):
ARGS_STRING = 'Args: -r -b <borderfile> -x <borderX> -y <borderY> -z <borderZ> -i <inputdirectory> -c <colors> -d <coloridx> -o <outputdir> -q <bestimage>'
try:
opts, args = getopt.getopt(argv, "rb:x:y:z:i:c:d:o:q:")
except getopt.GetoptError:
print ARGS_STRING
sys.exit(2)
nx = ny = nz = imageDx = imageDy = imageDz = 64
colors = 2
coloridx = 0
mandatory = 5
calculateout = False
# Files
BORDER_FILE = 'bordo3.json'
BEST_IMAGE = ''
DIR_IN = ''
DIR_O = ''
for opt, arg in opts:
if opt == '-x':
nx = ny = nz = imageDx = imageDy = imageDz = int(arg)
mandatory = mandatory - 1
elif opt == '-y':
ny = nz = imageDy = imageDz = int(arg)
elif opt == '-z':
nz = imageDz = int(arg)
elif opt == '-r':
calculateout = True
elif opt == '-i':
DIR_IN = arg + '/'
mandatory = mandatory - 1
elif opt == '-b':
BORDER_FILE = arg
mandatory = mandatory - 1
elif opt == '-o':
mandatory = mandatory - 1
DIR_O = arg
elif opt == '-c':
# mandatory = mandatory - 1
colors = int(arg)
elif opt == '-d':
mandatory = mandatory - 1
coloridx = int(arg)
elif opt == '-q':
BEST_IMAGE = int(arg)
# BEST_IMAGE = 10
if mandatory != 0:
print 'Not all arguments where given'
print ARGS_STRING
sys.exit(2)
returnValue = calcchains_main(
nx, ny, nz,
calculateout,
DIR_IN,
BORDER_FILE,
DIR_O,
# colors,
coloridx,
threshold=5000
)
sys.exit(returnValue)
def calcchains_main(
nx, ny, nz,
calculateout,
input_filename,
BORDER_FILE,
DIR_O,
# colors,
coloridx,
label
):
# if (coloridx >= colors):
# print 'Not all arguments where given (coloridx >= colors)'
# print ARGS_STRING
# sys.exit(2)
def ind(x, y, z):
return x + (nx+1) * (y + (ny+1) * (z))
chunksize = nx * ny + nx * nz + ny * nz + 3 * nx * ny * nz
V = [[x, y, z]
for z in xrange(nz + 1)
for y in xrange(ny + 1)
for x in xrange(nx + 1)]
v2coords = invertIndex(nx, ny, nz)
# mj
# construction of vertex grid
FV = []
for h in xrange(len(V)):
x, y, z = v2coords(h)
if (x < nx) and (y < ny):
FV.append([h, ind(x+1, y, z), ind(x, y+1, z), ind(x+1, y+1, z)])
if (x < nx) and (z < nz):
FV.append([h, ind(x+1, y, z), ind(x, y, z+1), ind(x+1, y, z+1)])
if (y < ny) and (z < nz):
FV.append([h, ind(x, y+1, z), ind(x, y, z+1), ind(x, y+1, z+1)])
# print 'coloridx ', coloridx
# print 'calc', calculateout
# print 'V.len: ', len(V), ' V[0:4]: ', V[0:4]
# print 'FV.len: ', len(FV), 'FV[0:4]: ', FV[0:4]
# print 'dirl', input_filename, BORDER_FILE
# print 'diro ', DIR_O
return runComputation(nx, ny, nz, coloridx, calculateout, V, FV, input_filename,
BORDER_FILE, DIR_O, label)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
zorroblue/scikit-learn | examples/calibration/plot_calibration.py | 41 | 4826 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see https://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is
able to provide a probability calibration that returns probabilities close
to the expected 0.5 for most of the samples belonging to the middle
cluster with heterogeneous labels. This results in a significantly improved
Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
# #############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color,
alpha=0.5, edgecolor='k',
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
abhishekraok/LayeredNeuralNetwork | layeredneuralnetwork/layered_neural_network.py | 1 | 5785 | import numpy as np
from layeredneuralnetwork import node_manager
from layeredneuralnetwork import node
from sklearn import svm, metrics
from layeredneuralnetwork import transform_function
from layeredneuralnetwork.classifier_interface import ClassifierInterface
from layeredneuralnetwork import utilities
retrain_threshold_f1_score = 0.9
class LayeredNeuralNetwork(ClassifierInterface):
def __init__(self, input_dimension):
ClassifierInterface.__init__(self, input_dimension)
self.input_dimension = input_dimension
self.node_manager = node_manager.NodeManager(input_dimension)
self.label_to_node_name = {}
self.labels = []
def fit(self, X, Y, label):
"""
Trains the model using data X for class named "label".
Y is binary indicating presence/absence of class.
:param X: numpy matrix of size (samples, features)
:param Y: numpy array of integers with values 0,1
:type label: str
:rtype: bool
:return: whether retrained
"""
print('Training for label ' + label)
if label in self.labels:
score = self.score(X, Y, label)
if score > retrain_threshold_f1_score:
print('Label {0} already exists with score {1}. Not retraining'.format(label, score))
return False
else:
print('Label {0} exists with score {1}. Retraining'.format(label, score))
self.fit_new_node(X, Y, label)
return True
def fit_new_node(self, X, Y, label):
sample_count = X.shape[0]
input_and_features = np.zeros(shape=[sample_count, self.input_dimension + len(self.labels)])
input_and_features[:, :self.input_dimension] = X
input_and_features[:, self.input_dimension:] = self.activate_all(X)
linear_svc = svm.LinearSVC(dual=False, penalty='l1')
linear_svc.fit(input_and_features, Y)
score = linear_svc.score(input_and_features, Y)
print('Trained new Linear SVC with score ' + str(score))
learned_transform_function = transform_function.SVCTransformFunction(
input_dimension=input_and_features.shape[1],
svm=linear_svc)
node_name = self.get_new_node_name(label)
input_names = self.node_manager.get_input_names() + self.latest_node_names()
new_node = node.Node(name=node_name,
input_names=input_names,
transform_function=learned_transform_function,
node_manager=self.node_manager,
is_input=False)
self.node_manager.add_node(new_node)
if label not in self.label_to_node_name:
self.labels.append(label)
self.label_to_node_name[label] = node_name
def latest_node_names(self):
return [self.label_to_node_name[i] for i in self.labels]
def predict(self, X, label):
"""
Predicts whether given X belongs to class "label".
:param X: numpy matrix of size (samples, features)
:type label: str
:return: a numpy array of size (samples) containing 1,0
:rtype: np.array
"""
if not label in self.label_to_node_name:
raise ValueError('No label named ' + label + ' in this LNN')
node_name = self.label_to_node_name[label]
features = self.node_manager.get_output(X, node_name)
return np.array(features > 0.5, dtype=np.int)
def activate_all(self, X):
"""
Activates all the labels.
:rtype: np.array
:return: numpy array with dimension (samples, learned label count)
"""
sample_count = X.shape[0]
result = np.zeros(shape=[sample_count, self.feature_count()])
for i, label in enumerate(self.labels):
result[:, i] = self.node_manager.get_output(X, self.label_to_node_name[label])
return result
def identify(self, X):
"""
Best guess for which class X belongs to.
:param X: numpy matrix of size (samples, features)
:return: guessed class name
:rtype: str
"""
features = self.activate_all(X)
max_index = np.argmax((features ** 2).mean(axis=0))
return self.labels[max_index]
def feature_count(self):
return len(self.labels)
def score(self, X, Y, label):
"""
Gets the F1 score for given input for given label.
:rtype: float
"""
predicted_y = self.predict(X, label)
return metrics.f1_score(Y, predicted_y)
def get_weights(self):
weights = np.zeros(shape=[len(self.labels), self.input_dimension + len(self.labels)])
for i, label in enumerate(self.labels):
label_weight = self.node_manager.get_weight(self.label_to_node_name[label])
weights[i, :label_weight.shape[0]] = label_weight
return weights
def get_new_node_name(self, label):
if label not in self.label_to_node_name:
return label + utilities.node_version_separator + '0'
old_node_name = self.label_to_node_name[label]
if utilities.node_version_separator in old_node_name:
components = old_node_name.split(utilities.node_version_separator)
version = components[-1]
if version.isdigit():
next_version = int(version) + 1
new_node_name = utilities.node_version_separator.join(
components[:-1]) + utilities.node_version_separator + str(
next_version)
if not self.node_manager.has_node_name(new_node_name):
return new_node_name
return old_node_name + utilities.generate_random_string(20)
| mit |
CJ-Jewell/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
njcuk9999/neil_superwasp_periodogram | fastDFT.py | 1 | 19103 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 08/03/17 at 12:41 PM
@author: neil
Program description here
Version 0.0.0
"""
import numpy as np
from astropy.io import fits
from numexpr import evaluate as ne
# =============================================================================
# Define variables
# =============================================================================
WORKSPACE = '/Astro/Projects/RayPaul_Work/SuperWASP/'
# Test data 1
TESTPATH = WORKSPACE + '/Programs/CLEAN_periodogram_IDL/test.fits'
# -----------------------------------------------------------------------------
MAX_SIZE = 10000
# =============================================================================
# Define functions
# =============================================================================
def dft_o(freq, tvec, dvec, kind='half'):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:param log: boolean, if True prints progress to standard output
if False silent
:param kind: string, if 'half' uses only the largest half of the frequencies
if 'full' will return all frequencies
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
"""
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
wfn = np.zeros(len(freq), dtype=complex)
if kind == 'half':
dft = np.zeros(int(len(freq)/2), dtype=complex)
else:
dft = np.zeros(len(freq), dtype=complex)
for i in range(len(freq)):
phase = -2*np.pi*freq[i]*tvec
phvec = np.array(np.cos(phase) + 1j * np.sin(phase))
if kind == 'half':
if i < int(len(freq)/2):
wfn[i] = np.sum(phvec)/len(tvec)
dft[i] = np.sum(dvec*phvec)/len(tvec)
# complete the spectral window function
else:
wfn[i] = np.sum(phvec)/len(tvec)
else:
wfn[i] = np.sum(phvec) / len(tvec)
dft[i] = np.sum(dvec * phvec) / len(tvec)
return wfn, dft
def dft_ne(freq, tvec, dvec, log=False, kind='half'):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:param log: boolean, if True prints progress to standard output
if False silent
:param kind: string, if 'half' uses only the largest half of the frequencies
if 'full' will return all frequencies
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
"""
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
wfn = np.zeros(len(freq), dtype=complex)
if kind == 'half':
dft = np.zeros(int(len(freq)/2), dtype=complex)
else:
dft = np.zeros(len(freq), dtype=complex)
# work out all ocnstants before loop
Ntvec, two_pi_t = len(tvec), -2*np.pi*tvec
Nfreq = int(len(freq)/2)
# loop around freq
for i in __tqdmlog__(range(len(freq)), log):
freqi = freq[i]
# phase = -2*np.pi*freq[i]*tvec
# phvec = np.array(np.cos(phase) + 1j * np.sin(phase))
phvec = ne('cos(freqi*two_pi_t) + 1j*sin(freqi*two_pi_t)')
#wfn[i] = np.sum(phvec) / len(tvec)
wfn[i] = ne('sum(phvec/Ntvec)')
if kind == 'half':
if i < Nfreq:
# dft[i] = np.sum(dvec*phvec)/len(tvec)
dft[i] = ne('sum(dvec*phvec/Ntvec)')
else:
dft[i] = ne('sum(dev*phvec/Ntvec)')
return wfn, dft
def dft_ne2(freq, tvec, dvec, kind='half'):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:param kind: string, if 'half' uses only the largest half of the frequencies
if 'full' will return all frequencies
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
"""
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
wfn = np.zeros(len(freq), dtype=complex)
if kind == 'half':
dft = np.zeros(int(len(freq)/2), dtype=complex)
else:
dft = np.zeros(len(freq), dtype=complex)
# work out all ocnstants before loop
Ntvec, two_pi_t = len(tvec), -2*np.pi*tvec
Nfreq = int(len(freq)/2)
# loop around freq
for i in range(len(freq)):
freqi = freq[i]
# phase = -2*np.pi*freq[i]*tvec
phase = freqi*two_pi_t
# phvec = np.array(np.cos(phase) + 1j * np.sin(phase))
phvec = ne('cos(phase) + 1j*sin(phase)')
#wfn[i] = np.sum(phvec) / len(tvec)
wfn[i] = ne('sum(phvec/Ntvec)')
if kind == 'half':
if i < Nfreq:
# dft[i] = np.sum(dvec*phvec)/len(tvec)
dft[i] = ne('sum(dvec*phvec/Ntvec)')
else:
dft[i] = ne('sum(dev*phvec/Ntvec)')
return wfn, dft
def dft_nfor(freq, tvec, dvec, kind='half'):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:param kind: string, if 'half' uses only the largest half of the frequencies
if 'full' will return all frequencies
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
"""
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
# wfn = np.zeros(len(freq), dtype=complex)
# dft = np.zeros(int(len(freq)/2), dtype=complex)
# make vectors in to matrices
fmatrix = np.matrix(freq) # shape (N x 1)
tmatrix = np.matrix(tvec) # shape (M x 1)
# need dvec to be tiled len(freq) times (to enable multiplication)
d_arr = np.tile(dvec, len(freq)).reshape(len(freq), len(dvec))
dmatrix = np.matrix(d_arr) # shape (N x M)
# work out the phase
phase = -2*np.pi*fmatrix.T*tmatrix # shape (N x M)
# work out the phase vector
phvec = np.cos(phase) + 1j*np.sin(phase) # shape (N x M)
# for freq/2 rows
wfn = np.sum(phvec, axis=1)/len(tvec) # shape (N x 1)
# only for the first freq/2 indices
if kind == 'half':
Nfreq = int(len(freq)/2)
dft = np.sum(np.array(dmatrix)[: Nfreq] * np.array(phvec)[: Nfreq],
axis=1)/len(tvec) # shape (N/2 x 1)
else:
dft = np.sum(np.array(dmatrix) * np.array(phvec), axis=1)/len(tvec)
return wfn, dft
def dft_nfor_ne(freq, tvec, dvec, kind='half'):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:param kind: string, if 'half' uses only the largest half of the frequencies
if 'full' will return all frequencies
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
"""
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
# wfn = np.zeros(len(freq), dtype=complex)
# dft = np.zeros(int(len(freq)/2), dtype=complex)
# make vectors in to matrices
fmatrix = np.matrix(freq) # shape (N x 1)
tmatrix = np.matrix(tvec) # shape (M x 1)
# need dvec to be tiled len(freq) times (to enable multiplication)
d_arr = np.tile(dvec, len(freq)).reshape(len(freq), len(dvec))
dmatrix = np.matrix(d_arr) # shape (N x M)
# work out the phase
ftmatrix = fmatrix.T
twopi = 2*np.pi
# phase = -2*np.pi*fmatrix.T*tmatrix # shape (N x M)
phase = ne('-twopi*ftmatrix*tmatrix')
# work out the phase vector
# phvec = np.cos(phase) + 1j*np.sin(phase) # shape (N x M)
phvec = ne('cos(phase) + 1j*sin(phase)')
# for freq/2 rows
wfn = np.sum(phvec, axis=1)/len(tvec) # shape (N x 1)
# only for the first freq/2 indices
if kind == 'half':
Nfreq = int(len(freq)/2)
darray = np.array(dmatrix[: Nfreq])
phvecarray = np.array(phvec)[: Nfreq]
else:
darray = np.array(dmatrix)
phvecarray = np.array(phvec)
# dft = np.sum(np.array(dmatrix)[: Nfreq] * np.array(phvec)[: Nfreq],
# axis=1)/len(tvec) # shape (N/2 x 1)
multiply = ne('darray * phvecarray')
dft = np.sum(multiply, axis=1)/len(tvec)
return wfn, dft
def dft_l(freq, tvec, dvec, log=False, maxsize=None, kind='half'):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:param log: boolean, if True prints progress to standard output
if False silent
:param maxsize: int, maximum number of frequency rows to processes,
default is 10,000 but large tvec/dvec array will use
a large amount of RAM (64*len(tvec)*maxsize bits of data)
If the program is using too much RAM, reduce "maxsize" or
bin up tvec/dvec
:param kind: string, if 'half' uses only the largest half of the frequencies
if 'full' will return all frequencies
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
"""
if maxsize is None:
maxsize = MAX_SIZE
if len(freq) < maxsize:
wfn, dft = dft_nfor(freq, tvec, dvec, kind)
# Need to cut up frequencies into managable chunks (with a for loop)
else:
chunks = int(np.ceil(len(freq)/maxsize))
wfn, dft = [], []
for chunk in __tqdmlog__(range(chunks), log):
# break frequency into bits
freqi = freq[chunk*maxsize: (chunk+1)*maxsize]
# get wfni and dfti for this chunk
wfni, dfti = dft_nfor(freqi, tvec, dvec, kind)
# append to list
wfn = np.append(wfn, np.array(wfni))
dft = np.append(dft, np.array(dfti))
# clean up
del freqi, wfni, dfti
# convert to numpy array
wfn, dft = np.array(wfn), np.array(dft)
return wfn, dft
def dft_l_ne(freq, tvec, dvec, log=False, maxsize=None, kind='half'):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:param log: boolean, if True prints progress to standard output
if False silent
:param maxsize: int, maximum number of frequency rows to processes,
default is 10,000 but large tvec/dvec array will use
a large amount of RAM (64*len(tvec)*maxsize bits of data)
If the program is using too much RAM, reduce "maxsize" or
bin up tvec/dvec
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
"""
if maxsize is None:
maxsize = MAX_SIZE
if len(freq) < maxsize:
wfn, dft = dft_nfor_ne(freq, tvec, dvec)
# Need to cut up frequencies into managable chunks (with a for loop)
else:
chunks = int(np.ceil(len(freq)/maxsize))
wfn, dft = [], []
for chunk in __tqdmlog__(range(chunks), log):
# break frequency into bits
freqi = freq[chunk*maxsize: (chunk+1)*maxsize]
# get wfni and dfti for this chunk
wfni, dfti = dft_nfor_ne(freqi, tvec, dvec, kind=kind)
# append to list
wfn = np.append(wfn, np.array(wfni))
dft = np.append(dft, np.array(dfti))
# clean up
del freqi, wfni, dfti
# convert to numpy array
wfn, dft = np.array(wfn), np.array(dft)
return wfn, dft
def __tqdmlog__(x_input, log):
"""
Private function for dealing with logging
:param x_input: any iterable object
:param log: bool, if True and module tqdm exists use logging
:return:
"""
# deal with importing tqdm
try:
from tqdm import tqdm
except ModuleNotFoundError:
tqdm = (lambda x: x)
# deal with logging
if log:
rr = tqdm(x_input)
else:
rr = x_input
return rr
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
import time as ttime
# Load data
print('\n Loading data...')
fitsrec = fits.getdata(TESTPATH, ext=1)
time_arr = np.array(fitsrec['time'], dtype=np.float64)
data_arr = np.array(fitsrec['flux'], dtype=np.float64)
# define freqs
times1, times2, times3, times4 = [], [], [], []
lengths = []
values = [1e0, 5e0, 1e1, 5e1, 1e2, 5e2, 1e3, 5e3, 1e4]
# values = [1e3]
for val in values:
interval = 2.5/val
freqs = np.arange(0, 200.5 + interval, interval)
lengths.append(len(freqs))
# ----------------------------------------------------------------------
print('\n\t Slow DFT...')
start1 = ttime.time()
wfn1, dft1 = dft_o(freqs, time_arr, data_arr)
end1 = ttime.time()
print('len(t)={0} len(f)={1}'.format(len(time_arr), len(freqs)))
print('Time taken = {0}'.format(end1 - start1))
times1.append(end1 - start1)
# # ----------------------------------------------------------------------
print('\n\t DFT with numexpr...')
start2 = ttime.time()
wfn2, dft2 = dft_l(freqs, time_arr, data_arr)
end2 = ttime.time()
print('len(t)={0} len(f)={1}'.format(len(time_arr), len(freqs)))
print('Time taken = {0}'.format(end2 - start2))
times2.append(end2 - start2)
# # ----------------------------------------------------------------------
print('\n\t DFT without for loop...')
start3 = ttime.time()
wfn3, dft3 = dft_l(freqs, time_arr, data_arr)
end3 = ttime.time()
print('len(t)={0} len(f)={1}'.format(len(time_arr), len(freqs)))
print('Time taken = {0}'.format(end3 - start3))
times3.append(end3 - start3)
# ----------------------------------------------------------------------
print('\n\t DFT without for loop + numexpr...')
start4 = ttime.time()
wfn4, dft4 = dft_l_ne(freqs, time_arr, data_arr)
end4 = ttime.time()
print('len(t)={0} len(f)={1}'.format(len(time_arr), len(freqs)))
print('Time taken = {0}'.format(end4 - start4))
times4.append(end4 - start4)
# ----------------------------------------------------------------------
import matplotlib.pyplot as plt
plt.plot(lengths, times1, color='k', label='for loop')
plt.plot(lengths, times2, color='b', label='for loop with numexpr')
plt.plot(lengths, times3, color='r', label='no for loop')
plt.plot(lengths, times4, color='purple', label='no for loop with numexpr')
xmin, xmax, ymin, ymax = plt.gca().axis()
plt.xscale('log')
plt.legend(loc=0)
plt.xlabel('Values of iteration')
plt.ylabel('Time taken / s')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.show()
plt.close()
# =============================================================================
# End of code
# =============================================================================
# lprun commands
# %load_ext line_profiler
# %lprun -s -f dft_nfor -T lp_results.txt dft_nfor(freq, tvec, dvec)
# %lprun -s -f DFT -T lp_results1.txt DFT(freq, tvec, dvec) | mit |
satriaphd/bgc-learn | core/utils.py | 1 | 8964 | import sys
import os
import subprocess
import json
import straight.plugin
from tempfile import TemporaryFile
from os import path
from core import log
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Bio import SearchIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from helperlibs.wrappers.io import TemporaryDirectory
from sklearn.externals import joblib
from time import gmtime, strftime
def get_version():
return "0.0.0alpha"
def load_plugins(module):
""
plugins = list(straight.plugin.load("modules.%s" % module))
plugins.sort(cmp=lambda x, y: cmp(x.priority, y.priority))
return plugins
available_algorithms = {
"classification": [{"name": plugin.name, "description": plugin.description} for plugin in load_plugins("classification")],
"regression": [{"name": plugin.name, "description": plugin.description} for plugin in load_plugins("regression")]
}
available_features = [{"name": plugin.name, "description": plugin.description} for plugin in load_plugins("feature_extraction")]
def get_algo_type(algo):
""
result = ""
for plugin in available_algorithms["classification"]:
if algo == plugin["name"]:
result = "classification"
break
for plugin in available_algorithms["regression"]:
if algo == plugin["name"]:
result = "regression"
break
return result
def check_unsupported_features(feature_list):
""
mapped_features = []
for feature in feature_list:
for plugin in available_features:
if feature == plugin["name"]:
mapped_features.append(feature)
unmapped_features = list(set(feature_list) - set(mapped_features))
return unmapped_features
def save_bgcjson(bgc, output_folder):
""
save_folder = path.join(output_folder, "bgcjson")
if not path.exists(save_folder):
if not path.isdir(save_folder):
os.makedirs(save_folder)
else:
log.error("Failed to save bgcjson, folder '%s' is unwritable" % save_folder)
sys.exit(1)
with open(path.join(save_folder, "%s.bgcjson" % bgc["id"]), "w") as bgcjson:
bgcjson.write(json.dumps(bgc, indent=4, separators=(',', ': ')))
def save_result_model(classifier, metadata, output_folder):
""
result_folder = path.join(output_folder, "results")
if not path.exists(result_folder):
if not path.isdir(result_folder):
os.makedirs(result_folder)
else:
log.error("Failed to create result folder")
sys.exit(1)
folder_name = "model-%s-%s" % (metadata["algorithm"]["name"], strftime("%Y%m%d-%H%M%S", gmtime()))
model_folder = path.join(result_folder, folder_name)
if not path.exists(model_folder):
os.makedirs(model_folder)
joblib.dump(classifier, path.join(model_folder, "classifier.pkl"))
with open(path.join(model_folder, "metadata.txt"), "w") as metafile:
metafile.write(json.dumps(metadata, indent=4, separators=(',', ': ')))
else:
log.error("Cannot write into '%s', folder exist." % (model_folder))
sys.exit(1)
return folder_name
def save_result_testing(folder_name, rows, columns, X, y, predicted, output_folder):
""
result_folder = path.join(output_folder, "results")
if not path.exists(result_folder):
if not path.isdir(result_folder):
os.makedirs(result_folder)
else:
log.error("Failed to create result folder")
sys.exit(1)
save_folder = path.join(result_folder, folder_name)
if not path.exists(save_folder):
os.makedirs(save_folder)
with open(path.join(save_folder, "result.txt"), "w") as resfile:
log.info("%s scores:" % folder_name)
for key, value in predicted[0].iteritems():
log.info("%s=%s" % (key, str(value)))
resfile.write("%s=%s\n" % (key, str(value)))
resfile.write("Predictions:\n")
resfile.write("row_name\t%s\texpected\tpredicted\n" % ("\t".join(columns)))
for i, fname in enumerate(rows):
resfile.write("%s\t%s\t%s\t%s\n" % (fname, "\t".join(["%.8f" % val for val in X[i]]), str(y[i]), str(predicted[1][i])))
else:
log.error("Cannot write into '%s', folder exist." % (model_folder))
sys.exit(1)
return folder_name
def create_feature_folder(input_files, output_folder, overwrite=True):
""
save_folder = path.join(output_folder, "features")
save_file = path.join(save_folder, "index.txt")
if not path.exists(save_folder):
if not path.isdir(save_folder):
os.makedirs(save_folder)
else:
log.error("Failed to create feature folder, folder '%s' is unwritable" % save_folder)
sys.exit(1)
if (not path.exists(save_file)) or overwrite:
with open(save_file, "w") as f:
for i, val in enumerate(input_files):
f.write("%d:%s\n" % (i, get_bgc_name(val)))
return save_folder
def get_bgc_name(file_path):
""
return path.splitext(path.basename(file_path))[0]
# ported from https://github.com/antismash/antismash
# pylint: disable=redefined-builtin
def execute(commands, input=None):
"Execute commands in a system-independent manner"
if input is not None:
stdin_redir = subprocess.PIPE
else:
stdin_redir = None
try:
proc = subprocess.Popen(commands, stdin=stdin_redir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(input=input)
retcode = proc.returncode
return out, err, retcode
except OSError, e:
logging.debug("%r %r returned %r", commands, input[:40] if input is not None else None, e)
raise
# pylint: enable=redefined-builtin
def run_hmmscan(query_sequence, opts=None):
""
# TODO: check if hmmscan available
# check if Pfam-A.hmm exists
pfam_folder = path.abspath(path.join(path.realpath(__file__), path.pardir, path.pardir, "resources", "pfamdb"))
if not path.exists(pfam_folder):
os.makedirs(pfam_folder)
else:
if not path.isdir(pfam_folder):
log.error("Failed to do hmmscan, folder '%s' is unwritable" % pfam_folder)
sys.exit(1)
if not path.exists(path.join(pfam_folder, "Pfam-A.hmm.h3m")):
log.error("Pfam-A database not downloaded, please run download_resources.py first")
sys.exit(1)
results = []
with TemporaryDirectory() as temp_dir:
domtblout = path.abspath(path.join(temp_dir, "domtblout"))
textout = path.abspath(path.join(temp_dir, "textout"))
command = ["hmmscan", "--cut_tc", "--domtblout", domtblout, "-o", textout]
temp_fasta = path.abspath(path.join(temp_dir, "temp_fasta"))
with open(temp_fasta, "w") as tf:
tf.write(query_sequence)
if opts is not None:
command.extend(opts)
command.extend([path.join(pfam_folder, "Pfam-A.hmm"), temp_fasta])
try:
out, err, retcode = execute(command)
except OSError:
return []
if retcode != 0:
log.error('Hmmscan returned %d: %r while scanning %r' % (retcode, err, query_sequence))
sys.exit(1)
results = {
"data": list(SearchIO.parse(textout, 'hmmer3-text')),
"accessions": {},
"lengths": {},
}
for scan in list(SearchIO.parse(domtblout, 'hmmscan3-domtab')):
for hit in scan.hits:
if hit.id not in results["accessions"]:
results["accessions"][hit.id] = hit.accession
if hit.id not in results["lengths"]:
results["lengths"][hit.id] = hit.seq_len
return results
def align_hsp_to_model(hsp, model_length): # this works only for hmmscan model (cds as query)
pad_left = ""
pad_right = ""
for i in xrange(0, hsp.hit_start):
pad_left += "."
for i in xrange(hsp.hit_end, model_length):
pad_right += "."
if hsp.hit_strand != hsp.query_strand:
pad_left, pad_right = pad_right, pad_left
return "%s%s%s" % (pad_left, str(hsp.hit.seq).replace(".", ""), pad_right)
# -*- coding: utf-8 -*-
# Print iterations progress
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=50):
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '*' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s %s%s (%i/%i) %s' % (prefix, percents, '%', iteration, total, suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
| gpl-3.0 |
qiime2/q2-types | q2_types/feature_data/_transformer.py | 1 | 20226 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections.abc
from itertools import zip_longest
import pandas as pd
import biom
import skbio
import qiime2
from ..plugin_setup import plugin
from ..feature_table import BIOMV210Format
from . import (TaxonomyFormat, HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
DNAFASTAFormat, PairedDNASequencesDirectoryFormat,
AlignedDNAFASTAFormat, DifferentialFormat, ProteinFASTAFormat,
AlignedProteinFASTAFormat, RNAFASTAFormat,
AlignedRNAFASTAFormat, PairedRNASequencesDirectoryFormat
)
# Taxonomy format transformers
def _taxonomy_formats_to_dataframe(filepath, has_header=None):
"""Read any of the three taxonomy formats into a dataframe.
Parameters
----------
filepath : str
The taxonomy-formatted file to be read.
has_header : bool, optional
If `None`, autodetect the header: only `Feature ID<tab>Taxon` is
recognized, optionally followed by other columns. If `True`, the file
must have the expected header described above otherwise an error is
raised. If `False`, the file is read without assuming a header.
Returns
-------
pd.DataFrame
Dataframe containing parsed contents of the taxonomy file. The
dataframe will have its index name set to `Feature ID` and its first
column will be `Taxon`, followed by any other columns in the input
file.
"""
# Using `dtype=object` and `set_index()` to avoid type casting/inference of
# any columns or the index.
df = pd.read_csv(filepath, sep='\t', skip_blank_lines=True,
header=None, dtype=object)
if len(df.columns) < 2:
raise ValueError(
"Taxonomy format requires at least two columns, found %d."
% len(df.columns))
if has_header and not _has_expected_header(df):
raise ValueError(
"Taxonomy format requires a header with `Feature ID` and `Taxon` "
"as the first two columns.")
if has_header or (has_header is None and _has_expected_header(df)):
# Make first row the header:
# https://stackoverflow.com/a/26147330/3776794
df.columns = df.iloc[0]
df.columns.name = None
df = df.reindex(df.index.drop(0))
else:
# No header
unnamed_columns = ['Unnamed Column %d' % (i + 1)
for i in range(len(df.columns[2:]))]
df.columns = TSVTaxonomyFormat.HEADER + unnamed_columns
df.set_index(df.columns[0], drop=True, append=False, inplace=True)
if len(df.index) < 1:
raise ValueError("Taxonomy format requires at least one row of data.")
if df.index.has_duplicates:
raise ValueError(
"Taxonomy format feature IDs must be unique. The following IDs "
"are duplicated: %s" %
', '.join(df.index[df.index.duplicated()].unique()))
if df.columns.has_duplicates:
raise ValueError(
"Taxonomy format column names must be unique. The following "
"column names are duplicated: %s" %
', '.join(df.columns[df.columns.duplicated()].unique()))
df['Taxon'] = df['Taxon'].str.strip()
return df
def _has_expected_header(df):
return df.iloc[0].tolist()[:2] == TSVTaxonomyFormat.HEADER
def _dataframe_to_tsv_taxonomy_format(df):
if len(df.index) < 1:
raise ValueError("Taxonomy format requires at least one row of data.")
if len(df.columns) < 1:
raise ValueError(
"Taxonomy format requires at least one column of data.")
if df.index.name != 'Feature ID':
raise ValueError(
"Taxonomy format requires the dataframe index name to be "
"`Feature ID`, found %r" % df.index.name)
if df.columns[0] != 'Taxon':
raise ValueError(
"Taxonomy format requires the first column name to be `Taxon`, "
"found %r" % df.columns[0])
if df.index.has_duplicates:
raise ValueError(
"Taxonomy format feature IDs must be unique. The following IDs "
"are duplicated: %s" %
', '.join(df.index[df.index.duplicated()].unique()))
if df.columns.has_duplicates:
raise ValueError(
"Taxonomy format column names must be unique. The following "
"column names are duplicated: %s" %
', '.join(df.columns[df.columns.duplicated()].unique()))
ff = TSVTaxonomyFormat()
df.to_csv(str(ff), sep='\t', header=True, index=True)
return ff
def _biom_to_tsv_taxonomy_format(table):
metadata = table.metadata(axis='observation')
ids = table.ids(axis='observation')
if metadata is None:
raise TypeError('Table must have observation metadata.')
taxonomy = []
for oid, m in zip(ids, metadata):
if 'taxonomy' not in m:
raise ValueError('Observation %s does not contain `taxonomy` '
'metadata.' % oid)
try:
taxonomy.append('; '.join(m['taxonomy']))
except Exception as e:
raise TypeError('There was a problem preparing the taxonomy '
'data for Observation %s. Metadata should be '
'formatted as a list of strings; received %r.'
% (oid, type(m['taxonomy']))) from e
series = pd.Series(taxonomy, index=ids, name='Taxon')
series.index.name = 'Feature ID'
return _dataframe_to_tsv_taxonomy_format(series.to_frame())
@plugin.register_transformer
def _4(ff: TaxonomyFormat) -> pd.DataFrame:
return _taxonomy_formats_to_dataframe(str(ff), has_header=None)
@plugin.register_transformer
def _6(ff: TaxonomyFormat) -> pd.Series:
df = _taxonomy_formats_to_dataframe(str(ff), has_header=None)
return df.iloc[:, 0]
@plugin.register_transformer
def _28(ff: TaxonomyFormat) -> qiime2.Metadata:
df = _taxonomy_formats_to_dataframe(str(ff), has_header=None)
return qiime2.Metadata(df)
@plugin.register_transformer
def _20(ff: HeaderlessTSVTaxonomyFormat) -> TSVTaxonomyFormat:
return _dataframe_to_tsv_taxonomy_format(
_taxonomy_formats_to_dataframe(str(ff), has_header=False))
@plugin.register_transformer
def _22(ff: TSVTaxonomyFormat) -> pd.DataFrame:
return _taxonomy_formats_to_dataframe(str(ff), has_header=True)
@plugin.register_transformer
def _23(ff: TSVTaxonomyFormat) -> pd.Series:
df = _taxonomy_formats_to_dataframe(str(ff), has_header=True)
return df.iloc[:, 0]
@plugin.register_transformer
def _29(ff: TSVTaxonomyFormat) -> qiime2.Metadata:
df = _taxonomy_formats_to_dataframe(str(ff), has_header=True)
return qiime2.Metadata(df)
@plugin.register_transformer
def _24(df: pd.DataFrame) -> TSVTaxonomyFormat:
return _dataframe_to_tsv_taxonomy_format(df)
@plugin.register_transformer
def _25(series: pd.Series) -> TSVTaxonomyFormat:
return _dataframe_to_tsv_taxonomy_format(series.to_frame())
@plugin.register_transformer
def _26(data: biom.Table) -> TSVTaxonomyFormat:
return _biom_to_tsv_taxonomy_format(data)
@plugin.register_transformer
def _27(ff: BIOMV210Format) -> TSVTaxonomyFormat:
# not using q2_types.feature_table._transformer._parse_biom_table_v210
# because it strips out metadata
with ff.open() as fh:
table = biom.Table.from_hdf5(fh)
return _biom_to_tsv_taxonomy_format(table)
# common to all FASTA transformers
def _read_from_fasta(path, constructor=skbio.DNA):
return skbio.read(path, format='fasta', constructor=constructor)
def _fastaformats_to_series(ff, constructor=skbio.DNA):
data = {}
for sequence in _read_from_fasta(str(ff), constructor):
id_ = sequence.metadata['id']
if id_ in data:
raise ValueError("FASTA format sequence IDs must be unique. The "
"following ID was found more than once: %s."
% id_)
data[id_] = sequence
return pd.Series(data)
def _fastaformats_to_metadata(ff, constructor=skbio.DNA):
df = _fastaformats_to_series(ff, constructor).to_frame()
df = df.astype(str)
df.index.name, df.columns = 'Feature ID', ['Sequence']
return qiime2.Metadata(df)
def _series_to_fasta_format(ff, data, sequence_type="DNA"):
with ff.open() as f:
for id_, seq in data.iteritems():
if sequence_type == "protein":
sequence = skbio.Protein(seq, metadata={'id': id_})
elif sequence_type == "DNA":
sequence = skbio.DNA(seq, metadata={'id': id_})
elif sequence_type == "RNA":
sequence = skbio.RNA(seq, metadata={'id': id_})
else:
raise NotImplementedError(
"pd.Series can only be converted to DNA or "
"protein FASTA format.")
skbio.io.write(sequence, format='fasta', into=f)
# DNA FASTA transformers
class NucleicAcidIterator(collections.abc.Iterable):
def __init__(self, generator):
self.generator = generator
def __iter__(self):
yield from self.generator
class DNAIterator(NucleicAcidIterator):
pass
class PairedDNAIterator(NucleicAcidIterator):
pass
class AlignedDNAIterator(NucleicAcidIterator):
pass
class RNAIterator(NucleicAcidIterator):
pass
class PairedRNAIterator(NucleicAcidIterator):
pass
class AlignedRNAIterator(NucleicAcidIterator):
pass
# DNA Transformers
@plugin.register_transformer
def _9(ff: DNAFASTAFormat) -> DNAIterator:
generator = _read_from_fasta(str(ff), skbio.DNA)
return DNAIterator(generator)
@plugin.register_transformer
def _10(data: DNAIterator) -> DNAFASTAFormat:
ff = DNAFASTAFormat()
skbio.io.write(iter(data), format='fasta', into=str(ff))
return ff
@plugin.register_transformer
def _11(df: PairedDNASequencesDirectoryFormat) -> PairedDNAIterator:
left = df.left_dna_sequences.view(DNAIterator)
right = df.right_dna_sequences.view(DNAIterator)
def read_seqs():
for lseq, rseq in zip_longest(left, right):
if rseq is None:
raise ValueError('more left sequences than right sequences')
if lseq is None:
raise ValueError('more right sequences than left sequences')
if rseq.metadata['id'] != lseq.metadata['id']:
raise ValueError(lseq.metadata['id'] + ' and ' +
rseq.metadata['id'] + ' differ')
yield lseq, rseq
return PairedDNAIterator(read_seqs())
@plugin.register_transformer
def _12(data: PairedDNAIterator) -> PairedDNASequencesDirectoryFormat:
df = PairedDNASequencesDirectoryFormat()
ff_left = DNAFASTAFormat()
ff_right = DNAFASTAFormat()
with ff_left.open() as lfile, ff_right.open() as rfile:
for lseq, rseq in data:
if rseq.metadata['id'] != lseq.metadata['id']:
raise ValueError(lseq.metadata['id'] + ' and ' +
rseq.metadata['id'] + ' differ')
skbio.io.write(lseq, format='fasta', into=lfile)
skbio.io.write(rseq, format='fasta', into=rfile)
df.left_dna_sequences.write_data(ff_left, DNAFASTAFormat)
df.right_dna_sequences.write_data(ff_right, DNAFASTAFormat)
return df
@plugin.register_transformer
def _13(ff: AlignedDNAFASTAFormat) -> skbio.TabularMSA:
return skbio.TabularMSA.read(str(ff), constructor=skbio.DNA,
format='fasta')
@plugin.register_transformer
def _14(data: skbio.TabularMSA) -> AlignedDNAFASTAFormat:
ff = AlignedDNAFASTAFormat()
data.write(str(ff), format='fasta')
return ff
@plugin.register_transformer
def _15(ff: DNAFASTAFormat) -> pd.Series:
return _fastaformats_to_series(ff, skbio.DNA)
@plugin.register_transformer
def _31(ff: DNAFASTAFormat) -> qiime2.Metadata:
return _fastaformats_to_metadata(ff, skbio.DNA)
@plugin.register_transformer
def _16(data: pd.Series) -> DNAFASTAFormat:
ff = DNAFASTAFormat()
_series_to_fasta_format(ff, data)
return ff
@plugin.register_transformer
def _18(ff: AlignedDNAFASTAFormat) -> AlignedDNAIterator:
generator = _read_from_fasta(str(ff), skbio.DNA)
return AlignedDNAIterator(generator)
@plugin.register_transformer
def _19(data: AlignedDNAIterator) -> AlignedDNAFASTAFormat:
ff = AlignedDNAFASTAFormat()
skbio.io.write(iter(data), format='fasta', into=str(ff))
return ff
@plugin.register_transformer
def _33(ff: AlignedDNAFASTAFormat) -> qiime2.Metadata:
return _fastaformats_to_metadata(ff, skbio.DNA)
@plugin.register_transformer
def _34(ff: AlignedDNAFASTAFormat) -> pd.Series:
return _fastaformats_to_series(ff, skbio.DNA)
@plugin.register_transformer
def _35(data: pd.Series) -> AlignedDNAFASTAFormat:
ff = AlignedDNAFASTAFormat()
_series_to_fasta_format(ff, data)
return ff
@plugin.register_transformer
def _36(fmt: AlignedDNAFASTAFormat) -> DNAIterator:
generator = _read_from_fasta(str(fmt), skbio.DNA)
return DNAIterator(generator)
# Protein FASTA transformers
class ProteinIterator(collections.abc.Iterable):
def __init__(self, generator):
self.generator = generator
def __iter__(self):
yield from self.generator
class AlignedProteinIterator(ProteinIterator):
pass
@plugin.register_transformer
def _37(ff: ProteinFASTAFormat) -> ProteinIterator:
generator = _read_from_fasta(str(ff), skbio.Protein)
return ProteinIterator(generator)
@plugin.register_transformer
def _38(data: ProteinIterator) -> ProteinFASTAFormat:
ff = ProteinFASTAFormat()
skbio.io.write(iter(data), format='fasta', into=str(ff))
return ff
@plugin.register_transformer
def _39(ff: AlignedProteinFASTAFormat) -> skbio.TabularMSA:
return skbio.TabularMSA.read(str(ff), constructor=skbio.Protein,
format='fasta')
@plugin.register_transformer
def _40(data: skbio.TabularMSA) -> AlignedProteinFASTAFormat:
ff = AlignedProteinFASTAFormat()
data.write(str(ff), format='fasta')
return ff
@plugin.register_transformer
def _41(ff: ProteinFASTAFormat) -> pd.Series:
return _fastaformats_to_series(ff, skbio.Protein)
@plugin.register_transformer
def _42(ff: ProteinFASTAFormat) -> qiime2.Metadata:
return _fastaformats_to_metadata(ff, skbio.Protein)
@plugin.register_transformer
def _43(data: pd.Series) -> ProteinFASTAFormat:
ff = ProteinFASTAFormat()
_series_to_fasta_format(ff, data, "protein")
return ff
@plugin.register_transformer
def _44(ff: AlignedProteinFASTAFormat) -> AlignedProteinIterator:
generator = _read_from_fasta(str(ff), skbio.Protein)
return AlignedProteinIterator(generator)
@plugin.register_transformer
def _45(data: AlignedProteinIterator) -> AlignedProteinFASTAFormat:
ff = AlignedProteinFASTAFormat()
skbio.io.write(iter(data), format='fasta', into=str(ff))
return ff
@plugin.register_transformer
def _46(ff: AlignedProteinFASTAFormat) -> qiime2.Metadata:
return _fastaformats_to_metadata(ff, skbio.Protein)
@plugin.register_transformer
def _47(ff: AlignedProteinFASTAFormat) -> pd.Series:
return _fastaformats_to_series(ff, skbio.Protein)
@plugin.register_transformer
def _48(data: pd.Series) -> AlignedProteinFASTAFormat:
ff = AlignedProteinFASTAFormat()
_series_to_fasta_format(ff, data, "protein")
return ff
@plugin.register_transformer
def _49(fmt: AlignedProteinFASTAFormat) -> ProteinIterator:
generator = _read_from_fasta(str(fmt), skbio.Protein)
return ProteinIterator(generator)
# RNA Transformers
@plugin.register_transformer
def _50(ff: RNAFASTAFormat) -> RNAIterator:
generator = _read_from_fasta(str(ff), constructor=skbio.RNA)
return RNAIterator(generator)
@plugin.register_transformer
def _51(data: RNAIterator) -> RNAFASTAFormat:
ff = RNAFASTAFormat()
skbio.io.write(iter(data), format='fasta', into=str(ff))
return ff
@plugin.register_transformer
def _52(ff: AlignedRNAFASTAFormat) -> skbio.TabularMSA:
return skbio.TabularMSA.read(str(ff), constructor=skbio.RNA,
format='fasta')
@plugin.register_transformer
def _53(data: skbio.TabularMSA) -> AlignedRNAFASTAFormat:
ff = AlignedRNAFASTAFormat()
data.write(str(ff), format='fasta')
return ff
@plugin.register_transformer
def _54(ff: RNAFASTAFormat) -> pd.Series:
return _fastaformats_to_series(ff, constructor=skbio.RNA)
@plugin.register_transformer
def _55(ff: RNAFASTAFormat) -> qiime2.Metadata:
return _fastaformats_to_metadata(ff, constructor=skbio.RNA)
@plugin.register_transformer
def _56(data: pd.Series) -> RNAFASTAFormat:
ff = RNAFASTAFormat()
_series_to_fasta_format(ff, data, sequence_type="RNA")
return ff
@plugin.register_transformer
def _57(ff: AlignedRNAFASTAFormat) -> AlignedRNAIterator:
generator = _read_from_fasta(str(ff), constructor=skbio.RNA)
return AlignedRNAIterator(generator)
@plugin.register_transformer
def _58(data: AlignedRNAIterator) -> AlignedRNAFASTAFormat:
ff = AlignedRNAFASTAFormat()
skbio.io.write(iter(data), format='fasta', into=str(ff))
return ff
@plugin.register_transformer
def _59(ff: AlignedRNAFASTAFormat) -> qiime2.Metadata:
return _fastaformats_to_metadata(ff, constructor=skbio.RNA)
@plugin.register_transformer
def _60(ff: AlignedRNAFASTAFormat) -> pd.Series:
return _fastaformats_to_series(ff, constructor=skbio.RNA)
@plugin.register_transformer
def _61(data: pd.Series) -> AlignedRNAFASTAFormat:
ff = AlignedRNAFASTAFormat()
_series_to_fasta_format(ff, data, sequence_type="RNA")
return ff
@plugin.register_transformer
def _62(fmt: AlignedRNAFASTAFormat) -> RNAIterator:
generator = _read_from_fasta(str(fmt), constructor=skbio.RNA)
return RNAIterator(generator)
@plugin.register_transformer
def _63(df: PairedRNASequencesDirectoryFormat) -> PairedRNAIterator:
left = df.left_rna_sequences.view(RNAIterator)
right = df.right_rna_sequences.view(RNAIterator)
def read_seqs():
for lseq, rseq in zip_longest(left, right):
if rseq is None:
raise ValueError('more left sequences than right sequences')
if lseq is None:
raise ValueError('more right sequences than left sequences')
if rseq.metadata['id'] != lseq.metadata['id']:
raise ValueError(lseq.metadata['id'] + ' and ' +
rseq.metadata['id'] + ' differ')
yield lseq, rseq
return PairedRNAIterator(read_seqs())
@plugin.register_transformer
def _64(data: PairedRNAIterator) -> PairedRNASequencesDirectoryFormat:
df = PairedRNASequencesDirectoryFormat()
ff_left = RNAFASTAFormat()
ff_right = RNAFASTAFormat()
with ff_left.open() as lfile, ff_right.open() as rfile:
for lseq, rseq in data:
if rseq.metadata['id'] != lseq.metadata['id']:
raise ValueError(lseq.metadata['id'] + ' and ' +
rseq.metadata['id'] + ' differ')
skbio.io.write(lseq, format='fasta', into=lfile)
skbio.io.write(rseq, format='fasta', into=rfile)
df.left_rna_sequences.write_data(ff_left, RNAFASTAFormat)
df.right_rna_sequences.write_data(ff_right, RNAFASTAFormat)
return df
# differential types
@plugin.register_transformer
def _222(ff: DifferentialFormat) -> pd.DataFrame:
return qiime2.Metadata.load(str(ff)).to_dataframe()
@plugin.register_transformer
def _223(ff: DifferentialFormat) -> qiime2.Metadata:
return qiime2.Metadata.load(str(ff))
@plugin.register_transformer
def _224(data: pd.DataFrame) -> DifferentialFormat:
ff = DifferentialFormat()
qiime2.Metadata(data).save(str(ff))
return ff
| bsd-3-clause |
ntamas/yard | yard/curve.py | 1 | 32746 | """
Curve classes used in YARD.
This package contains implementations for all the curves YARD can plot.
At the time of writing, this includes:
- ROC curves (`ROCCurve`)
- CROC curves (`CROCCurve`)
- Precision-recall curves (`PrecisionRecallCurve`)
- Sensitivity-specificity plots (`SensitivitySpecificityCurve`)
- Accumulation curves (`AccumulationCurve`)
- F-score curves (`FScoreCurve`)
"""
__author__ = "Tamas Nepusz"
__email__ = "tamas@cs.rhul.ac.uk"
__copyright__ = "Copyright (c) 2010, Tamas Nepusz"
__license__ = "MIT"
from bisect import bisect
from yard.data import BinaryConfusionMatrix, BinaryClassifierData
from yard.transform import ExponentialTransformation
from yard.utils import axis_label, itersubclasses
class Curve(object):
"""Class representing an arbitrary curve on a 2D space.
At this stage, a curve is nothing else but a series of points.
"""
def __init__(self, points):
"""Constructs a curve with the given points. `points` must be
an iterable of 2-tuples containing the coordinates of the points.
"""
self._points = None
self.points = points
def auc(self):
"""Returns the area under the curve.
The area is calculated using a trapezoidal approximation to make the
AUC of the `ROCCurve` class relate to the Gini coefficient (where
G1 + 1 = 2 * AUC).
"""
points = self.points
auc = sum(
(y0 + y1) / 2.0 * (x1 - x0)
for (x0, y0), (x1, y1) in zip(points, points[1:])
)
return auc
def coarsen(self, **kwds):
"""Coarsens the curve in-place.
This method is useful before plotting a curve that consists
of many data points that are potentially close to each other.
The method of coarsening is defined by the keyword arguments
passed to this function.
There are two different coarsening methods. The first
method is invoked as ``coarsen(every=k)`` (where `k` is an
integer) and it will keep every `k`th point from the curve.
You can also call ``coarsen(until=k)`` which will keep on
removing points from the curve (approximately evenly) until
only `k` points remain. If there are less than `k` points
initially, the curve will not be changed.
"""
# Note: we will always keep the first and the last element
if "every" in kwds and "until" in kwds:
raise TypeError("use either every=... or until=...")
if "every" not in kwds and "until" not in kwds:
raise TypeError("use either every=... or until=...")
points = self.points
if not points:
return
if "every" in kwds:
k = int(kwds["every"])
self._points = points[::k]
if len(points) % k != 0:
self._points.append(points[-1])
return
k = int(kwds["until"])
n = len(points)
step = (n - 1) / (k - 1.0)
result = [points[int(idx * step)] for idx in range(1, k - 1)]
result.append(points[-1])
self._points = result
def get_empty_figure(self, *args, **kwds):
"""Returns an empty `matplotlib.Figure` that can be used to show the
curve. The arguments of this function are passed on intact to the
constructor of `matplotlib.Figure`, except these (which are interpreted
here):
- `title`: the title of the figure.
- `xlabel`: the label of the X axis.
- `ylabel`: the label of the Y axis.
These must be given as keyword arguments.
"""
import matplotlib.pyplot as plt
# Extract the keyword arguments handled here
kwds_extra = dict(xlabel=None, ylabel=None, title=None)
for name in kwds_extra.keys():
if name in kwds:
kwds_extra[name] = kwds[name]
del kwds[name]
# Construct the figure
fig = plt.figure(*args, **kwds)
# Create the axes, set the axis labels and the plot title
axes = fig.add_subplot(111)
for name, value in kwds_extra.items():
if value is not None:
getattr(axes, "set_%s" % name)(value)
# axes.set_xbound(0.0, 1.0)
# axes.set_ybound(0.0, 1.0)
return fig
def get_figure(self, *args, **kwds):
"""Returns a `matplotlib.Figure` that shows the curve.
The arguments of this function are passed on intact to
`get_empty_figure()`, except the following which are
interpreted here:
- `legend`: whether we want a legend on the figure or not.
If ``False``, no legend will be shown. If ``True``,
`matplotlib` will try to place the legend in an
optimal position. If an integer or string, it will be
interpreted as a location code by `matplotlib`.
"""
if "legend" in kwds:
legend = kwds["legend"]
del kwds["legend"]
else:
legend = False
# Get an empty figure and its axes, and plot the curve on the axes
fig = self.get_empty_figure(*args, **kwds)
self.plot_on_axes(fig.get_axes()[0], legend=legend)
return fig
def get_interpolated_point(self, x):
"""Returns an interpolated point on this curve at the given
X position.
The default implementation uses linear interpolation from the
nearest two points.
It is assumed that `self._points` is sorted in ascending order.
If not, this function will produce wrong results.
"""
points = self.points
pos = bisect(points, (x, 0))
# Do we have an exact match?
try:
if points[pos][0] == x:
return points[pos]
except IndexError:
pass
# Nope, so we have to interpolate
if pos == 0:
# Extrapolating instead
(x1, y1), (x2, y2) = points[:2]
elif pos == len(points):
# Extrapolating instead
(x1, y1), (x2, y2) = points[-2:]
else:
# Truly interpolating
(x1, y1), (x2, y2) = points[pos - 1 : pos + 1]
r = (x2 - x) / float(x2 - x1)
return (x, y1 * r + y2 * (1 - r))
def plot_on_axes(self, axes, style="r-", legend=True):
"""Plots the curve on the given `matplotlib.Axes` object.
`style` specifies the style of the curve using ordinary
``matplotlib`` conventions. `legend` specifies the position
where the legend should be added. ``False`` or ``None``
means no legend.
"""
# Plot the points
xs, ys = zip(*self.points)
(curve,) = axes.plot(xs, ys, style)
# Create the legend
if legend is True:
legend = 0
if legend is not None and legend is not False:
label = self._data.title
if label is not None:
axes.legend(curve, (label,), legend)
return curve
@property
def points(self):
"""Returns the points of this curve as a list of 2-tuples.
The returned list is the same as the list used internally in
the instance. Don't modify it unless you know what you're doing.
"""
return self._points
@points.setter
def points(self, points):
"""Sets the points of this curve. The method makes a copy of the
given iterable."""
self._points = sorted(tuple(point) for point in points)
def resample(self, new_xs):
"""Resamples the curve in-place at the given X positions.
`xs` must be a list of positions on the X axis; interpolation
will be used to calculate the corresponding Y values based on
the nearest known values.
"""
self._points = [self.get_interpolated_point(x) for x in new_xs]
def show(self, *args, **kwds):
"""Constructs and shows a `matplotlib.Figure` that plots the
curve. If you need the figure itself for further manipulations,
call `get_figure()` instead of this method.
The arguments of this function are passed on intact to
`get_figure()`.
"""
self.get_figure(*args, **kwds).show()
def transform(self, transformation):
"""Transforms the curve in-place by sending all the points to a given
callable one by one. The given callable must expect two real numbers
and return the transformed point as a tuple."""
self.points = [transformation(*point) for point in self._points]
def transform_x(self, transformation):
"""Transforms the X axis of the curve in-place by sending all the
points to a given callable one by one. The given callable must expect
a single real number and return the transformed value."""
self.points = [(transformation(x), y) for x, y in self._points]
def transform_y(self, transformation):
"""Transforms the Y axis of the curve in-place by sending all the
points to a given callable one by one. The given callable must expect
a single real number and return the transformed value."""
self._points = [(x, transformation(y)) for x, y in self._points]
class CurveFactory(object):
"""Factory class to construct `Curve` instances from short identifiers.
Short identifiers for curve types are typically used in the command-line
interface of `yard` to let the user specify which curve he/she wants to
plot. This factory class interprets the short identifiers and constructs
appropriate `Curve` instances.
"""
@classmethod
def construct_from_name(cls, name, *args, **kwds):
"""Constructs a curve from a short name used in command line arguments
across the whole ``yard`` package.
`name` is matched against the ``identifier`` class-level properties of
all the subclasses of `Curve` to find the subclass to be constructed.
All the remaining arguments are passed on intact to the constructor of
the subclass. Returns a new instance of the found subclass, or raises
``ValueError`` if an invalid name was given.
"""
return cls.find_class_by_name(name)(*args, **kwds)
@staticmethod
def find_class_by_name(name):
"""Finds the class corresponding to a given short name used in command
line arguments across the whole ``yard`` package.
`name` is matched against the ``identifier`` class-level properties of
all the subclasses of `Curve` to find the subclass to be constructed.
Returns the found subclass (not an instance of it), or raises
``ValueError`` if an invalid name was given.
"""
name = name.lower()
for cls in itersubclasses(Curve):
if hasattr(cls, "identifier") and cls.identifier == name:
return cls
raise ValueError("no such curve type: %s" % name)
@staticmethod
def get_curve_names():
return sorted(
[
cls.identifier
for cls in itersubclasses(Curve)
if hasattr(cls, "identifier")
]
)
class BinaryClassifierPerformanceCurve(Curve):
"""Class representing a broad class of binary classifier performance
curves.
By using this class diretly, you are free to specify what's on the X
and Y axes of the plot. If you are interested in ROC curves, see
`ROCCurve`, which is a subclass of this class. If you are interested
in precision-recall curves, see `PrecisionRecallCurve`, which is also
a subclass. Accumulation curves are implemented in `AccumulationCurve`,
sensitivity-specificity plots are in `SensitivitySpecificityCurve`
etc.
"""
def __init__(self, data, x_func, y_func):
"""Constructs a binary classifier performance curve from the given
dataset using the two given measures on the X and Y axes.
The dataset must contain ``(x, y)`` pairs where `x` is a predicted
value and `y` defines whether the example is positive or negative.
When `y` is less than or equal to zero, it is considered a negative
example, otherwise it is positive. ``False`` also means a negative
and ``True`` also means a positive example. The dataset can also
be an instance of :class:`BinaryClassifierData`.
`x_func` and `y_func` must either be unbound method instances of
the `BinaryConfusionMatrix` class, or functions that accept
`BinaryConfusionMatrix` instances as their only arguments and
return a number.
"""
self._data = None
self._points = None
self.x_func = x_func
self.y_func = y_func
if not hasattr(self.x_func, "__call__"):
raise TypeError("x_func must be callable")
if not hasattr(self.y_func, "__call__"):
raise TypeError("y_func must be callable")
self.data = data
def _calculate_points(self):
"""Returns the actual points of the curve as a list of tuples."""
x_func, y_func = self.x_func, self.y_func
self.points = [
(x_func(mat), y_func(mat))
for _, mat in self._data.iter_confusion_matrices()
]
@property
def data(self):
"""Returns the data points from which we generate the curve"""
return self._data
@data.setter
def data(self, data):
"""Sets the data points from which we generate the curve."""
if isinstance(data, BinaryClassifierData):
self._data = data
else:
self._data = BinaryClassifierData(data)
self._calculate_points()
def get_empty_figure(self, *args, **kwds):
"""Returns an empty `matplotlib.Figure` that can be used
to show the classifier curve. The arguments of this function are
passed on intact to the constructor of `matplotlib.Figure`,
except these (which are interpreted here):
- `title`: the title of the figure.
- `xlabel`: the label of the X axis. If omitted, we will
try to infer it from `self.x_func`.
- `ylabel`: the label of the Y axis. If omitted, we will
try to infer it from `self.y_func`.
These must be given as keyword arguments.
Axis labels are inferred from the function objects that were
used to obtain the points of the curve; in particular, this method
is looking for an attribute named ``__axis_label__``, attached to
the function objects. You can attach such an attribute easily
by using `yard.utils.axis_label` as a decorator.
"""
# Infer the labels of the X and Y axes
def infer_label(func):
try:
return getattr(func, "__axis_label__")
except AttributeError:
return func.__name__
if "xlabel" not in kwds:
kwds["xlabel"] = infer_label(self.x_func)
if "ylabel" not in kwds:
kwds["ylabel"] = infer_label(self.y_func)
return super(BinaryClassifierPerformanceCurve, self).get_empty_figure(
*args, **kwds
)
@classmethod
def get_friendly_name(cls):
"""Returns a human-readable name of the curve that can be
used in messages."""
return cls.__name__
class ROCCurve(BinaryClassifierPerformanceCurve):
"""Class representing a ROC curve.
A ROC curve plots the true positive rate on the Y axis versus
the false positive rate on the X axis.
"""
identifier = "roc"
def __init__(self, data):
"""Constructs a ROC curve from the given dataset.
The dataset must contain ``(x, y)`` pairs where `x` is a predicted
value and `y` defines whether the example is positive or negative.
When `y` is less than or equal to zero, it is considered a negative
example, otherwise it is positive. ``False`` also means a negative
and ``True`` also means a positive example. The dataset can also
be an instance of `BinaryClassifierData`.
"""
super(ROCCurve, self).__init__(
data, BinaryConfusionMatrix.fpr, BinaryConfusionMatrix.tpr
)
def auc(self):
"""Constructs the area under the ROC curve by a linear transformation
of the rank sum of positive instances."""
pos_ranks = self.data.get_positive_ranks()
return self.auc_from_pos_ranks(pos_ranks, len(self.data))
@staticmethod
def auc_from_pos_ranks(ranks, total):
"""Returns the AUC under a ROC curve, given the ranks of the positive
examples and the total number of examples.
This method can be used to calculate an AUC value quickly without
constructing the curve itself if you have the positive ranks.
"""
num_pos = len(ranks)
num_neg = float(total - num_pos)
sum_pos_ranks = (total + 1) * num_pos - sum(ranks)
return 1.0 - sum_pos_ranks / (num_pos * num_neg) + (num_pos + 1) / (2 * num_neg)
def get_empty_figure(self, *args, **kwds):
"""Returns an empty `matplotlib.Figure` that can be used
to show the ROC curve. The arguments of this function are
passed on intact to the constructor of `matplotlib.Figure`,
except these (which are interpreted here):
- `title`: the title of the figure.
- `xlabel`: the label of the X axis.
- `ylabel`: the label of the Y axis.
- `no_discrimination_line`: if ``True``, the no discrimination
line will be drawn. If ``False``, it won't be drawn. If
a string, it is interpreted as a line style by
``matplotlib`` and this line style will be used to draw
the no discrimination line. If it is a tuple, the first
element of the tuple will be interpreted as the color
and the second will be interpreted as the line style
by ``matplotlib``.
These must be given as keyword arguments.
"""
if "no_discrimination_line" in kwds:
no_discrimination_line = kwds["no_discrimination_line"]
del kwds["no_discrimination_line"]
else:
no_discrimination_line = ("#444444", ":")
# Create the figure by calling the superclass
fig = super(ROCCurve, self).get_empty_figure(*args, **kwds)
axes = fig.get_axes()[0]
# Plot the no-discrimination line
if no_discrimination_line:
if isinstance(no_discrimination_line, (tuple, list)):
color, linestyle = no_discrimination_line
axes.plot([0, 1], color=color, linestyle=linestyle)
else:
axes.plot([0, 1], no_discrimination_line)
return fig
@classmethod
def get_friendly_name(cls):
"""Returns a human-readable name of the curve that can be
used in messages."""
return "ROC curve"
class PrecisionRecallCurve(BinaryClassifierPerformanceCurve):
"""Class representing a precision-recall curve.
A precision-recall curve plots precision on the Y axis versus
recall on the X axis.
"""
identifier = "pr"
def __init__(self, data):
"""Constructs a precision-recall curve from the given dataset.
The dataset must contain ``(x, y)`` pairs where `x` is a predicted
value and `y` defines whether the example is positive or negative.
When `y` is less than or equal to zero, it is considered a negative
example, otherwise it is positive. ``False`` also means a negative
and ``True`` also means a positive example. The dataset can also
be an instance of `BinaryClassifierData`.
"""
super(PrecisionRecallCurve, self).__init__(
data, BinaryConfusionMatrix.recall, BinaryConfusionMatrix.precision
)
@classmethod
def get_friendly_name(cls):
"""Returns a human-readable name of the curve that can be
used in messages."""
return "precision-recall curve"
def get_interpolated_point(self, x):
"""Returns an interpolated point on this curve at the given
X position.
This method performs the proper non-linear interpolation that
is required for precision-recall curves. Basically, for each
point, we find the two nearest known points, infer the original
TP, FP and FN values at those points, and then we interpolate
linearly in the space of TP-FP-FN values, while recalculating
the precision and the recall at x.
It is assumed that `self._points` is sorted in ascending order.
If not, this function will produce wrong results.
"""
points = self.points
pos = bisect(points, (x, 0))
# Do we have an exact match?
try:
if points[pos][0] == x:
return points[pos]
except IndexError:
pass
# Nope, so we have to interpolate
if pos == 0:
# Extrapolation is not possible, just return the
# first element from points
return points[0]
elif pos == len(points):
# Extrapolation is not possible, just return the
# last element from points
return points[-1]
# Truly interpolating
(x1, y1), (x2, y2) = points[pos - 1 : pos + 1]
# The calculations (spelled out nicely) would be as follows:
#
# total_pos = self.data.total_positives
# tp_left, tp_right = total_pos * x1, total_pos * x2
# fp_left = tp_left * (1. - y1) / y1
# fp_right = tp_right * (1. - y2) / y2
# r = (tp_right-tp_mid)/float(tp_right-tp_left)
# fp_mid = fp_left*r + fp_right*(1-r)
# tp_mid = total_pos * x
# recall_mid = tp_mid / (tp_mid + fp_mid)
# return (x, recall_mid)
#
# Now, we recognise that we can divide almost everything with
# total_pos, leading us to the following implementation:
fp_left_over_total_pos = x1 * (1.0 - y1) / y1
fp_right_over_total_pos = x2 * (1.0 - y2) / y2
r = (x2 - x) / float(x2 - x1)
fp_mid_over_total_pos = fp_left_over_total_pos * r + fp_right_over_total_pos * (
1 - r
)
return (x, x / (x + fp_mid_over_total_pos))
class SensitivitySpecificityCurve(BinaryClassifierPerformanceCurve):
"""Class representing a sensitivity-specificity plot.
A sensitivity-specificity curve plots the sensitivity on the Y axis
versus the specificity on the X axis.
"""
identifier = "sespe"
def __init__(self, data):
"""Constructs a sensitivity-specificity curve from the given dataset.
The dataset must contain ``(x, y)`` pairs where `x` is a predicted
value and `y` defines whether the example is positive or negative.
When `y` is less than or equal to zero, it is considered a negative
example, otherwise it is positive. ``False`` also means a negative
and ``True`` also means a positive example. The dataset can also
be an instance of `BinaryClassifierData`.
"""
super(SensitivitySpecificityCurve, self).__init__(
data, BinaryConfusionMatrix.tnr, BinaryConfusionMatrix.recall
)
@classmethod
def get_friendly_name(cls):
"""Returns a human-readable name of the curve that can be
used in messages."""
return "sensitivity-specificity plot"
class AccumulationCurve(BinaryClassifierPerformanceCurve):
"""Class representing an accumulation curve.
An accumulation curve plots the true positive rate on the Y axis
versus the fraction of data classified as positive on the X axis.
"""
identifier = "ac"
def __init__(self, data):
"""Constructs an accumulation curve from the given dataset.
The dataset must contain ``(x, y)`` pairs where `x` is a predicted
value and `y` defines whether the example is positive or negative.
When `y` is less than or equal to zero, it is considered a negative
example, otherwise it is positive. ``False`` also means a negative
and ``True`` also means a positive example. The dataset can also
be an instance of `BinaryClassifierData`.
"""
super(AccumulationCurve, self).__init__(
data, BinaryConfusionMatrix.fdp, BinaryConfusionMatrix.tpr
)
@classmethod
def get_friendly_name(cls):
"""Returns a human-readable name of the curve that can be
used in messages."""
return "accumulation curve"
class CROCCurve(BinaryClassifierPerformanceCurve):
"""Class representing a concentrated ROC curve.
A CROC curve plots the true positive rate on the Y axis versus
the false positive rate on the X axis, but it transforms the X axis
in order to give more emphasis to the left hand side of the X axis
(close to zero).
"""
identifier = "croc"
def __init__(self, data, alpha=7):
"""Constructs a CROC curve from the given dataset.
The dataset must contain ``(x, y)`` pairs where `x` is a predicted
value and `y` defines whether the example is positive or negative.
When `y` is less than or equal to zero, it is considered a negative
example, otherwise it is positive. ``False`` also means a negative
and ``True`` also means a positive example. The dataset can also
be an instance of `BinaryClassifierData`.
`alpha` is the magnification factor that defines how much do we want
to focus on the left side of the X axis. The default `alpha`=7
transforms a FPR of 0.1 to 0.5.
"""
self._transformation = ExponentialTransformation(alpha)
super(CROCCurve, self).__init__(
data, self._transformed_fpr, BinaryConfusionMatrix.tpr
)
def auc(self):
"""Constructs the area under the ROC curve by the average of the
FPRs at thresholds equal to each positive instance."""
pos_ranks = self.data.get_positive_ranks()
return self.auc_from_pos_ranks(pos_ranks, len(self.data))
def auc_from_pos_ranks(self, pos_ranks, total):
"""Returns the AUC under a CROC curve, given the ranks of the positive
examples and the total number of examples.
This method can be used to calculate an AUC value quickly without
constructing the curve itself if you have the positive ranks.
"""
pos_count = len(pos_ranks)
neg_count = float(total - pos_count)
if neg_count == 0.0:
return 1.0
trans = self._transformation
fprs = [1.0 - (rank - i - 1) / neg_count for i, rank in enumerate(pos_ranks)]
return 1.0 - sum(trans(fprs)) / pos_count
@axis_label("Transformed false positive rate")
def _transformed_fpr(self, matrix):
"""Internal function that returns the transformed FPR value from the
given confusion matrix that should be plotted on the X axis."""
return self._transformation(matrix.fpr())
def get_empty_figure(self, *args, **kwds):
"""Returns an empty `matplotlib.Figure` that can be used
to show the ROC curve. The arguments of this function are
passed on intact to the constructor of `matplotlib.Figure`,
except these (which are interpreted here):
- `title`: the title of the figure.
- `xlabel`: the label of the X axis.
- `ylabel`: the label of the Y axis.
- `no_discrimination_curve`: if ``True``, the no discrimination
curve will be drawn. If ``False``, it won't be drawn. If
a string, it is interpreted as a line style by
``matplotlib`` and this line style will be used to draw
the no discrimination line. If it is a tuple, the first
element of the tuple will be interpreted as the color
and the second will be interpreted as the line style
by ``matplotlib``.
These must be given as keyword arguments.
"""
if "no_discrimination_curve" in kwds:
no_discrimination_curve = kwds["no_discrimination_curve"]
del kwds["no_discrimination_curve"]
else:
no_discrimination_curve = ("#444444", ":")
# Create the figure by calling the superclass
fig = super(CROCCurve, self).get_empty_figure(*args, **kwds)
axes = fig.get_axes()[0]
# Plot the no-discrimination curve
if no_discrimination_curve:
ys = [y / 100.0 for y in range(101)]
xs = [self._transformation(y) for y in ys]
if isinstance(no_discrimination_curve, (tuple, list)):
color, linestyle = no_discrimination_curve
axes.plot(xs, ys, color=color, linestyle=linestyle)
else:
axes.plot(xs, ys, no_discrimination_curve)
return fig
def get_interpolated_point(self, x):
"""Returns an interpolated point on this curve at the given
X position.
This method performs the proper non-linear interpolation that
is required for concentrated ROC curves. Basically, for each
point, we find the two nearest known points, transform the
X coordinates back to obtain the original FPRs, interpolate
them, then transform then again.
It is assumed that `self._points` is sorted in ascending order.
If not, this function will produce wrong results.
"""
points = self.points
pos = bisect(points, (x, 0))
# Do we have an exact match?
try:
if points[pos][0] == x:
return points[pos]
except IndexError:
pass
# Nope, so we have to interpolate
if pos == 0:
# Extrapolation is not possible, just return the
# first element from points
return points[0]
elif pos == len(points):
# Extrapolation is not possible, just return the
# last element from points
return points[-1]
if pos == 0:
# Extrapolating instead
(x1, y1), (x2, y2) = points[:2]
elif pos == len(points):
# Extrapolating instead
(x1, y1), (x2, y2) = points[-2:]
else:
# Truly interpolating
(x1, y1), (x2, y2) = points[pos - 1 : pos + 1]
trans_inv = self._transformation.inverse
fpr1, fpr2, fpr_mid = trans_inv(x1), trans_inv(x2), trans_inv(x)
r = (fpr2 - fpr_mid) / (fpr2 - fpr1)
return (x, y1 * r + y2 * (1 - r))
@classmethod
def get_friendly_name(cls):
"""Returns a human-readable name of the curve that can be
used in messages."""
return "concentrated ROC curve"
class FScoreCurve(BinaryClassifierPerformanceCurve):
"""Class representing an F-score curve.
An F-score curve plots the F-score on the Y axis versus the fraction
of data classified as positive on the X axis.
"""
identifier = "fscore"
def __init__(self, data, f=1.0):
"""Constructs an F-score curve from the given dataset.
The dataset must contain ``(x, y)`` pairs where `x` is a predicted
value and `y` defines whether the example is positive or negative.
When `y` is less than or equal to zero, it is considered a negative
example, otherwise it is positive. ``False`` also means a negative
and ``True`` also means a positive example. The dataset can also
be an instance of `BinaryClassifierData`.
The value of `f` controls the weighting between precision and recall
in the F-score formula. `f` = 1 means that equal importance is attached
to precision and recall. In general, recall is considered `f` times more
important than precision.
"""
@axis_label("F-score")
def f_score(matrix):
"""Internal function that binds the `f` parameter of
`BinaryConfusionMatrix.f_score` to the value specified in the constructor.
"""
return BinaryConfusionMatrix.f_score(matrix, f)
super(FScoreCurve, self).__init__(data, BinaryConfusionMatrix.fdp, f_score)
@classmethod
def get_friendly_name(cls):
"""Returns a human-readable name of the curve that can be
used in messages."""
return "F-score curve"
| mit |
FreeSchoolHackers/data_hacking | dga_detection/dga_model_gen.py | 6 | 13951 |
''' Build models to detect Algorithmically Generated Domain Names (DGA).
We're trying to classify domains as being 'legit' or having a high probability
of being generated by a DGA (Dynamic Generation Algorithm). We have 'legit' in
quotes as we're using the domains in Alexa as the 'legit' set.
'''
import os, sys
import traceback
import json
import optparse
import pickle
import collections
import sklearn
import sklearn.feature_extraction
import sklearn.ensemble
import sklearn.metrics
import pandas as pd
import numpy as np
import tldextract
import math
# Version printing is always a good idea
print 'Scikit Learn version: %s' % sklearn.__version__
print 'Pandas version: %s' % pd.__version__
print 'TLDExtract version: %s' % tldextract.__version__
# Version 0.12.0 of Pandas has a DeprecationWarning about Height blah that I'm ignoring
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Okay for this model we need the 2LD and nothing else
def domain_extract(uri):
ext = tldextract.extract(uri)
if (not ext.suffix):
return None
else:
return ext.domain
# Entropy calc (this must match model_eval)
def entropy(s):
p, lns = collections.Counter(s), float(len(s))
return -sum( count/lns * math.log(count/lns, 2) for count in p.values())
def show_cm(cm, labels):
# Compute percentanges
percent = (cm*100.0)/np.array(np.matrix(cm.sum(axis=1)).T) # Derp, I'm sure there's a better way
print 'Confusion Matrix Stats'
for i, label_i in enumerate(labels):
for j, label_j in enumerate(labels):
print "%s/%s: %.2f%% (%d/%d)" % (label_i, label_j, (percent[i][j]), cm[i][j], cm[i].sum())
def save_model_to_disk(name, model, model_dir='models'):
''' Serialize and save a model to disk'''
# First serialized the model
serialized_model = pickle.dumps(model, protocol=pickle.HIGHEST_PROTOCOL)
# Model directory + model name
model_path = os.path.join(model_dir, name+'.model')
# Now store it to disk
print 'Storing Serialized Model to Disk (%s:%.2fMeg)' % (name, len(serialized_model)/1024.0/1024.0)
open(model_path,'wb').write(serialized_model)
def load_model_from_disk(name, model_dir='models'):
# Model directory is relative to this file
model_path = os.path.join(model_dir, name+'.model')
# Put a try/except around the model load in case it fails
try:
model = pickle.loads(open(model_path,'rb').read())
except:
print 'Could not load model: %s from directory %s!' % (name, model_path)
return None
return model
def main():
''' Main method, takes care of loading data, running it through the various analyses
and reporting the results
'''
# Handle command-line arguments
parser = optparse.OptionParser()
parser.add_option('--alexa-file', default='data/alexa_100k.csv', help='Alexa file to pull from. Default: %default')
(options, arguments) = parser.parse_args()
print options, arguments
try: # Pokemon exception handling
# This is the Alexa 1M domain list.
print 'Loading alexa dataframe...'
alexa_dataframe = pd.read_csv(options.alexa_file, names=['rank','uri'], header=None, encoding='utf-8')
print alexa_dataframe.info()
print alexa_dataframe.head()
# Compute the 2LD of the domain given by Alexa
alexa_dataframe['domain'] = [ domain_extract(uri) for uri in alexa_dataframe['uri']]
del alexa_dataframe['rank']
del alexa_dataframe['uri']
alexa_dataframe = alexa_dataframe.dropna()
alexa_dataframe = alexa_dataframe.drop_duplicates()
print alexa_dataframe.head()
# Set the class
alexa_dataframe['class'] = 'legit'
# Shuffle the data (important for training/testing)
alexa_dataframe = alexa_dataframe.reindex(np.random.permutation(alexa_dataframe.index))
alexa_total = alexa_dataframe.shape[0]
print 'Total Alexa domains %d' % alexa_total
# Read in the DGA domains
dga_dataframe = pd.read_csv('data/dga_domains.txt', names=['raw_domain'], header=None, encoding='utf-8')
# We noticed that the blacklist values just differ by captilization or .com/.org/.info
dga_dataframe['domain'] = dga_dataframe.applymap(lambda x: x.split('.')[0].strip().lower())
del dga_dataframe['raw_domain']
# It's possible we have NaNs from blanklines or whatever
dga_dataframe = dga_dataframe.dropna()
dga_dataframe = dga_dataframe.drop_duplicates()
dga_total = dga_dataframe.shape[0]
print 'Total DGA domains %d' % dga_total
# Set the class
dga_dataframe['class'] = 'dga'
print 'Number of DGA domains: %d' % dga_dataframe.shape[0]
print dga_dataframe.head()
# Concatenate the domains in a big pile!
all_domains = pd.concat([alexa_dataframe, dga_dataframe], ignore_index=True)
# Add a length field for the domain
all_domains['length'] = [len(x) for x in all_domains['domain']]
# Okay since we're trying to detect dynamically generated domains and short
# domains (length <=6) are crazy random even for 'legit' domains we're going
# to punt on short domains (perhaps just white/black list for short domains?)
all_domains = all_domains[all_domains['length'] > 6]
# Add a entropy field for the domain
all_domains['entropy'] = [entropy(x) for x in all_domains['domain']]
print all_domains.head()
# Now we compute NGrams for every Alexa domain and see if we can use the
# NGrams to help us better differentiate and mark DGA domains...
# Scikit learn has a nice NGram generator that can generate either char NGrams or word NGrams (we're using char).
# Parameters:
# - ngram_range=(3,5) # Give me all ngrams of length 3, 4, and 5
# - min_df=1e-4 # Minimumum document frequency. At 1e-4 we're saying give us NGrams that
# # happen in at least .1% of the domains (so for 100k... at least 100 domains)
alexa_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-4, max_df=1.0)
# I'm SURE there's a better way to store all the counts but not sure...
# At least the min_df parameters has already done some thresholding
counts_matrix = alexa_vc.fit_transform(alexa_dataframe['domain'])
alexa_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = alexa_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, alexa_counts), key=operator.itemgetter(1), reverse=True)
print 'Alexa NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We're also going to throw in a bunch of dictionary words
word_dataframe = pd.read_csv('data/words.txt', names=['word'], header=None, dtype={'word': np.str}, encoding='utf-8')
# Cleanup words from dictionary
word_dataframe = word_dataframe[word_dataframe['word'].map(lambda x: str(x).isalpha())]
word_dataframe = word_dataframe.applymap(lambda x: str(x).strip().lower())
word_dataframe = word_dataframe.dropna()
word_dataframe = word_dataframe.drop_duplicates()
print word_dataframe.head(10)
# Now compute NGrams on the dictionary words
# Same logic as above...
dict_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-5, max_df=1.0)
counts_matrix = dict_vc.fit_transform(word_dataframe['word'])
dict_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = dict_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, dict_counts), key=operator.itemgetter(1), reverse=True)
print 'Word NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We use the transform method of the CountVectorizer to form a vector
# of ngrams contained in the domain, that vector is than multiplied
# by the counts vector (which is a column sum of the count matrix).
def ngram_count(domain):
alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot vector multiply and transpose Woo Hoo!
dict_match = dict_counts * dict_vc.transform([domain]).T
print '%s Alexa match:%d Dict match: %d' % (domain, alexa_match, dict_match)
# Examples:
ngram_count('google')
ngram_count('facebook')
ngram_count('1cb8a5f36f')
ngram_count('pterodactylfarts')
ngram_count('ptes9dro-dwacty2lfa5rrts')
ngram_count('beyonce')
ngram_count('bey666on4ce')
# Compute NGram matches for all the domains and add to our dataframe
all_domains['alexa_grams']= alexa_counts * alexa_vc.transform(all_domains['domain']).T
all_domains['word_grams']= dict_counts * dict_vc.transform(all_domains['domain']).T
print all_domains.head()
# Use the vectorized operations of the dataframe to investigate differences
# between the alexa and word grams
all_domains['diff'] = all_domains['alexa_grams'] - all_domains['word_grams']
# The table below shows those domain names that are more 'dictionary' and less 'web'
print all_domains.sort(['diff'], ascending=True).head(10)
# The table below shows those domain names that are more 'web' and less 'dictionary'
# Good O' web....
print all_domains.sort(['diff'], ascending=False).head(50)
# Lets look at which Legit domains are scoring low on both alexa and word gram count
weird_cond = (all_domains['class']=='legit') & (all_domains['word_grams']<3) & (all_domains['alexa_grams']<2)
weird = all_domains[weird_cond]
print weird.shape[0]
print weird.head(10)
# Epiphany... Alexa really may not be the best 'exemplar' set...
# (probably a no-shit moment for everyone else :)
#
# Discussion: If you're using these as exemplars of NOT DGA, then your probably
# making things very hard on your machine learning algorithm.
# Perhaps we should have two categories of Alexa domains, 'legit'
# and a 'weird'. based on some definition of weird.
# Looking at the entries above... we have approx 80 domains
# that we're going to mark as 'weird'.
#
all_domains.loc[weird_cond, 'class'] = 'weird'
print all_domains['class'].value_counts()
all_domains[all_domains['class'] == 'weird'].head()
# Perhaps we will just exclude the weird class from our ML training
not_weird = all_domains[all_domains['class'] != 'weird']
X = not_weird.as_matrix(['length', 'entropy', 'alexa_grams', 'word_grams'])
# Labels (scikit learn uses 'y' for classification labels)
y = np.array(not_weird['class'].tolist())
# Random Forest is a popular ensemble machine learning classifier.
# http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.RandomForestClassifier.html
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=20, compute_importances=True) # Trees in the forest
# Train on a 80/20 split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Now plot the results of the holdout set in a confusion matrix
labels = ['legit', 'dga']
cm = sklearn.metrics.confusion_matrix(y_test, y_pred, labels)
show_cm(cm, labels)
# We can also look at what features the learning algorithm thought were the most important
importances = zip(['length', 'entropy', 'alexa_grams', 'word_grams'], clf.feature_importances_)
print importances
# Now train on the whole thing before doing tests and saving models to disk
clf.fit(X, y)
# test_it shows how to do evaluation, also fun for manual testing below :)
def test_it(domain):
_alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot matrix multiply and transpose Woo Hoo!
_dict_match = dict_counts * dict_vc.transform([domain]).T
_X = [len(domain), entropy(domain), _alexa_match, _dict_match]
print '%s : %s' % (domain, clf.predict(_X)[0])
# Examples (feel free to change these and see the results!)
test_it('google')
test_it('google88')
test_it('facebook')
test_it('1cb8a5f36f')
test_it('pterodactylfarts')
test_it('ptes9dro-dwacty2lfa5rrts')
test_it('beyonce')
test_it('bey666on4ce')
test_it('supersexy')
test_it('yourmomissohotinthesummertime')
test_it('35-sdf-09jq43r')
test_it('clicksecurity')
# Serialize model to disk
save_model_to_disk('dga_model_random_forest', clf)
save_model_to_disk('dga_model_alexa_vectorizor', alexa_vc)
save_model_to_disk('dga_model_alexa_counts', alexa_counts)
save_model_to_disk('dga_model_dict_vectorizor', dict_vc)
save_model_to_disk('dga_model_dict_counts', dict_counts)
except KeyboardInterrupt:
print 'Goodbye Cruel World...'
sys.exit(0)
except Exception, error:
traceback.print_exc()
print '(Exception):, %s' % (str(error))
sys.exit(1)
if __name__ == '__main__':
main() | mit |
VipinRathor/zeppelin | spark/interpreter/src/main/resources/python/zeppelin_pyspark.py | 5 | 2768 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from py4j.java_gateway import java_import
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
# for back compatibility
from pyspark.sql import SQLContext, Row
intp = gateway.entry_point
if intp.isSpark3():
warnings.filterwarnings(action='ignore', module='pyspark.util')
jsc = intp.getJavaSparkContext()
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
if not intp.isSpark1():
from pyspark.sql import SparkSession
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlc = __zSqlc__ = __zSpark__._wrapped
else:
sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
sqlContext = __zSqlc__
from zeppelin_context import PyZeppelinContext
#TODO(zjffdu) merge it with IPySparkZeppelinContext
class PySparkZeppelinContext(PyZeppelinContext):
def __init__(self, z, gateway):
super(PySparkZeppelinContext, self).__init__(z, gateway)
def show(self, obj, **kwargs):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(self.z.showData(obj._jdf))
else:
super(PySparkZeppelinContext, self).show(obj, **kwargs)
z = __zeppelin__ = PySparkZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
# add jars to path
import sys
jarlist = map(lambda url: url.replace("file:/", "/"), (conf.get("spark.jars") or "").split(","))
sys.path.extend(filter(lambda jar: jar not in sys.path, jarlist))
| apache-2.0 |
hmendozap/auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_nystroem_sampler.py | 1 | 4484 | import unittest
import numpy as np
import sklearn.preprocessing
from autosklearn.pipeline.components.feature_preprocessing.nystroem_sampler import \
Nystroem
from autosklearn.pipeline.util import _test_preprocessing, get_dataset
class NystroemComponentTest(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(Nystroem)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 100)
self.assertFalse((transformation == 0).all())
# Custon preprocessing test to check if clipping to zero works
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None})
transformer = preprocessor.fit(X_train, Y_train)
transformation, original = transformer.transform(
X_train), original_X_train
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 100)
#@unittest.skip("Right now, the RBFSampler returns a float64 array!")
def _test_preprocessing_dtype(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("iris")
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("iris")
X_train = X_train.astype(np.float64)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
# Sparse
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True)
X_train = X_train.astype(np.float64)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
| bsd-3-clause |
akionakamura/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/animation.py | 4 | 41616 | # TODO:
# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
# working as we want. PyGTK bug?
# * Documentation -- this will need a new section of the User's Guide.
# Both for Animations and just timers.
# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations
# * Blit
# * Currently broken with Qt4 for widgets that don't start on screen
# * Still a few edge cases that aren't working correctly
# * Can this integrate better with existing matplotlib animation artist flag?
# - If animated removes from default draw(), perhaps we could use this to
# simplify initial draw.
# * Example
# * Frameless animation - pure procedural with no loop
# * Need example that uses something like inotify or subprocess
# * Complex syncing examples
# * Movies
# * Can blit be enabled for movies?
# * Need to consider event sources to allow clicking through multiple figures
import sys
import itertools
import contextlib
from matplotlib.cbook import iterable, is_string_like
from matplotlib.compat import subprocess
from matplotlib import verbose
from matplotlib import rcParams
# Other potential writing methods:
# * http://pymedia.org/
# * libmng (produces swf) python wrappers: https://github.com/libming/libming
# * Wrap x264 API:
# (http://stackoverflow.com/questions/2940671/
# how-to-encode-series-of-images-into-h264-using-x264-api-c-c )
# A registry for available MovieWriter classes
class MovieWriterRegistry(object):
def __init__(self):
self.avail = dict()
# Returns a decorator that can be used on classes to register them under
# a name. As in:
# @register('foo')
# class Foo:
# pass
def register(self, name):
def wrapper(writerClass):
if writerClass.isAvailable():
self.avail[name] = writerClass
return writerClass
return wrapper
def list(self):
''' Get a list of available MovieWriters.'''
return self.avail.keys()
def is_available(self, name):
return name in self.avail
def __getitem__(self, name):
if not self.avail:
raise RuntimeError("No MovieWriters available!")
return self.avail[name]
writers = MovieWriterRegistry()
class MovieWriter(object):
'''
Base class for writing movies. Fundamentally, what a MovieWriter does
is provide is a way to grab frames by calling grab_frame(). setup()
is called to start the process and finish() is called afterwards.
This class is set up to provide for writing movie frame data to a pipe.
saving() is provided as a context manager to facilitate this process as::
with moviewriter.saving('myfile.mp4'):
# Iterate over frames
moviewriter.grab_frame()
The use of the context manager ensures that setup and cleanup are
performed as necessary.
frame_format: string
The format used in writing frame data, defaults to 'rgba'
'''
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
'''
Construct a new MovieWriter object.
fps: int
Framerate for movie.
codec: string or None, optional
The codec to use. If None (the default) the setting in the
rcParam `animation.codec` is used.
bitrate: int or None, optional
The bitrate for the saved movie file, which is one way to control
the output file size and quality. The default value is None,
which uses the value stored in the rcParam `animation.bitrate`.
A value of -1 implies that the bitrate should be determined
automatically by the underlying utility.
extra_args: list of strings or None
A list of extra string arguments to be passed to the underlying
movie utiltiy. The default is None, which passes the additional
argurments in the 'animation.extra_args' rcParam.
metadata: dict of string:string or None
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
'''
self.fps = fps
self.frame_format = 'rgba'
if codec is None:
self.codec = rcParams['animation.codec']
else:
self.codec = codec
if bitrate is None:
self.bitrate = rcParams['animation.bitrate']
else:
self.bitrate = bitrate
if extra_args is None:
self.extra_args = list(rcParams[self.args_key])
else:
self.extra_args = extra_args
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
@property
def frame_size(self):
'A tuple (width,height) in pixels of a movie frame.'
width_inches, height_inches = self.fig.get_size_inches()
return width_inches * self.dpi, height_inches * self.dpi
def setup(self, fig, outfile, dpi, *args):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
'''
self.outfile = outfile
self.fig = fig
self.dpi = dpi
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
@contextlib.contextmanager
def saving(self, *args):
'''
Context manager to facilitate writing the movie file.
``*args`` are any parameters that should be passed to `setup`.
'''
# This particular sequence is what contextlib.contextmanager wants
self.setup(*args)
yield
self.finish()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
if verbose.ge('debug'):
output = sys.stdout
else:
output = subprocess.PIPE
verbose.report('MovieWriter.run: running command: %s' %
' '.join(command))
self._proc = subprocess.Popen(command, shell=False,
stdout=output, stderr=output,
stdin=subprocess.PIPE)
def finish(self):
'Finish any processing for writing the movie.'
self.cleanup()
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the 'savefig'
command that saves the figure.
'''
verbose.report('MovieWriter.grab_frame: Grabbing frame.',
level='debug')
try:
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
self.fig.savefig(self._frame_sink(), format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except RuntimeError:
out, err = self._proc.communicate()
verbose.report('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out,
err), level='helpful')
raise
def _frame_sink(self):
'Returns the place to which frames should be written.'
return self._proc.stdin
def _args(self):
'Assemble list of utility-specific command-line arguments.'
return NotImplementedError("args needs to be implemented by subclass.")
def cleanup(self):
'Clean-up and collect the process used to write the movie file.'
out, err = self._proc.communicate()
verbose.report('MovieWriter -- '
'Command stdout:\n%s' % out, level='debug')
verbose.report('MovieWriter -- '
'Command stderr:\n%s' % err, level='debug')
@classmethod
def bin_path(cls):
'''
Returns the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
'''
return rcParams[cls.exec_key]
@classmethod
def isAvailable(cls):
'''
Check to see if a MovieWriter subclass is actually available by
running the commandline tool.
'''
try:
subprocess.Popen(cls.bin_path(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except OSError:
return False
class FileMovieWriter(MovieWriter):
'`MovieWriter` subclass that handles writing to a file.'
def __init__(self, *args, **kwargs):
MovieWriter.__init__(self, *args, **kwargs)
self.frame_format = rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi, frame_prefix='_tmp', clear_temp=True):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
frame_prefix: string, optional
The filename prefix to use for the temporary files. Defaults
to '_tmp'
clear_temp: bool
Specifies whether the temporary files should be deleted after
the movie is written. (Useful for debugging.) Defaults to True.
'''
self.fig = fig
self.outfile = outfile
self.dpi = dpi
self.clear_temp = clear_temp
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_names = list()
self.fname_format_str = '%s%%07d.%s'
@property
def frame_format(self):
'''
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
'''
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def _frame_sink(self):
# Creates a filename for saving using the basename and the current
# counter.
fname = self._base_temp_name() % self._frame_counter
# Save the filename so we can delete it later if necessary
self._temp_names.append(fname)
verbose.report(
'FileMovieWriter.frame_sink: saving frame %d to fname=%s' %
(self._frame_counter, fname),
level='debug')
self._frame_counter += 1 # Ensures each created name is 'unique'
# This file returned here will be closed once it's used by savefig()
# because it will no longer be referenced and will be gc-ed.
return open(fname, 'wb')
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
self._run()
MovieWriter.finish(self) # Will call clean-up
# Check error code for creating file here, since we just run
# the process here, rather than having an open pipe.
if self._proc.returncode:
raise RuntimeError('Error creating movie, return code: '
+ str(self._proc.returncode)
+ ' Try running with --verbose-debug')
def cleanup(self):
MovieWriter.cleanup(self)
#Delete temporary files
if self.clear_temp:
import os
verbose.report(
'MovieWriter: clearing temporary fnames=%s' %
str(self._temp_names),
level='debug')
for fname in self._temp_names:
os.remove(fname)
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase:
exec_key = 'animation.ffmpeg_path'
args_key = 'animation.ffmpeg_args'
@property
def output_args(self):
# The %dk adds 'k' as a suffix so that ffmpeg treats our bitrate as in
# kbps
args = ['-vcodec', self.codec]
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate])
if self.extra_args:
args.extend(self.extra_args)
for k, v in self.metadata.items():
args.extend(['-metadata', '%s=%s' % (k, v)])
return args + ['-y', self.outfile]
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(MovieWriter, FFMpegBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-r', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
if not verbose.ge('debug'):
args += ['-loglevel', 'quiet']
args += ['-i', 'pipe:'] + self.output_args
return args
#Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FileMovieWriter, FFMpegBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
return [self.bin_path(), '-vframes', str(self._frame_counter),
'-r', str(self.fps), '-i',
self._base_temp_name()] + self.output_args
# Base class of avconv information. AVConv has identical arguments to
# FFMpeg
class AVConvBase(FFMpegBase):
exec_key = 'animation.avconv_path'
args_key = 'animation.avconv_args'
# Combine AVConv options with pipe-based writing
@writers.register('avconv')
class AVConvWriter(AVConvBase, FFMpegWriter):
pass
# Combine AVConv options with file-based writing
@writers.register('avconv_file')
class AVConvFileWriter(AVConvBase, FFMpegFileWriter):
pass
# Base class of mencoder information. Contains configuration key information
# as well as arguments for controlling *output*
class MencoderBase:
exec_key = 'animation.mencoder_path'
args_key = 'animation.mencoder_args'
# Mencoder only allows certain keys, other ones cause the program
# to fail.
allowed_metadata = ['name', 'artist', 'genre', 'subject', 'copyright',
'srcform', 'comment']
# Mencoder mandates using name, but 'title' works better with ffmpeg.
# If we find it, just put it's value into name
def _remap_metadata(self):
if 'title' in self.metadata:
self.metadata['name'] = self.metadata['title']
@property
def output_args(self):
self._remap_metadata()
args = ['-o', self.outfile, '-ovc', 'lavc', '-lavcopts',
'vcodec=%s' % self.codec]
if self.bitrate > 0:
args.append('vbitrate=%d' % self.bitrate)
if self.extra_args:
args.extend(self.extra_args)
if self.metadata:
args.extend(['-info', ':'.join('%s=%s' % (k, v)
for k, v in self.metadata.items()
if k in self.allowed_metadata)])
return args
# Combine Mencoder options with pipe-based writing
@writers.register('mencoder')
class MencoderWriter(MovieWriter, MencoderBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(), '-', '-demuxer', 'rawvideo', '-rawvideo',
('w=%i:h=%i:' % self.frame_size +
'fps=%i:format=%s' % (self.fps,
self.frame_format))] + self.output_args
# Combine Mencoder options with temp file-based writing
@writers.register('mencoder_file')
class MencoderFileWriter(FileMovieWriter, MencoderBase):
supported_formats = ['png', 'jpeg', 'tga', 'sgi']
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(),
'mf://%s*.%s' % (self.temp_prefix, self.frame_format),
'-frames', str(self._frame_counter), '-mf',
'type=%s:fps=%d' % (self.frame_format,
self.fps)] + self.output_args
# Base class for animated GIFs with convert utility
class ImageMagickBase:
exec_key = 'animation.convert_path'
args_key = 'animation.convert_args'
@property
def delay(self):
return 100. / self.fps
@property
def output_args(self):
return [self.outfile]
@writers.register('imagemagick')
class ImageMagickWriter(MovieWriter, ImageMagickBase):
def _args(self):
return ([self.bin_path(),
'-size', '%ix%i' % self.frame_size, '-depth', '8',
'-delay', str(self.delay), '-loop', '0',
'%s:-' % self.frame_format]
+ self.output_args)
@writers.register('imagemagick_file')
class ImageMagickFileWriter(FileMovieWriter, ImageMagickBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0',
'%s*.%s' % (self.temp_prefix, self.frame_format)]
+ self.output_args)
class Animation(object):
'''
This class wraps the creation of an animation using matplotlib. It is
only a base class which should be subclassed to provide needed behavior.
*fig* is the figure object that is used to get draw, resize, and any
other needed events.
*event_source* is a class that can run a callback when desired events
are generated, as well as be stopped and started. Examples include timers
(see :class:`TimedAnimation`) and file system notifications.
*blit* is a boolean that controls whether blitting is used to optimize
drawing.
'''
def __init__(self, fig, event_source=None, blit=False):
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Clear the initial frame
self._init_draw()
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def _start(self, *args):
'''
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
'''
# On start, we add our callback for stepping the animation and
# actually start the event_source. We also disconnect _start
# from the draw_events
self.event_source.add_callback(self._step)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._first_draw_id)
self._first_draw_id = None # So we can check on save
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None):
'''
Saves a movie file by drawing every frame.
*filename* is the output filename, e.g., :file:`mymovie.mp4`
*writer* is either an instance of :class:`MovieWriter` or a string
key that identifies a class to use, such as 'ffmpeg' or 'mencoder'.
If nothing is passed, the value of the rcparam `animation.writer` is
used.
*fps* is the frames per second in the movie. Defaults to None,
which will use the animation's specified interval to set the frames
per second.
*dpi* controls the dots per inch for the movie frames. This combined
with the figure's size in inches controls the size of the movie.
*codec* is the video codec to be used. Not all codecs are supported
by a given :class:`MovieWriter`. If none is given, this defaults to the
value specified by the rcparam `animation.codec`.
*bitrate* specifies the amount of bits used per second in the
compressed movie, in kilobits per second. A higher number means a
higher quality movie, but at the cost of increased file size. If no
value is given, this defaults to the value given by the rcparam
`animation.bitrate`.
*extra_args* is a list of extra string arguments to be passed to the
underlying movie utiltiy. The default is None, which passes the
additional argurments in the 'animation.extra_args' rcParam.
*metadata* is a dictionary of keys and values for metadata to include
in the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
*extra_anim* is a list of additional `Animation` objects that should
be included in the saved movie file. These need to be from the same
`matplotlib.Figure` instance. Also, animation frames will just be
simply combined, so there should be a 1:1 correspondence between
the frames from the different animations.
*savefig_kwargs* is a dictionary containing keyword arguments to be
passed on to the 'savefig' command which is called repeatedly to save
the individual frames. This can be used to set tight bounding boxes,
for example.
'''
if savefig_kwargs is None:
savefig_kwargs = {}
# FIXME: Using 'bbox_inches' doesn't currently work with
# writers that pipe the data to the command because this
# requires a fixed frame size (see Ryan May's reply in this
# thread: [1]). Thus we drop the 'bbox_inches' argument if it
# exists in savefig_kwargs.
#
# [1] (http://matplotlib.1069221.n5.nabble.com/
# Animation-class-let-save-accept-kwargs-which-
# are-passed-on-to-savefig-td39627.html)
#
if 'bbox_inches' in savefig_kwargs:
if not (writer in ['ffmpeg_file', 'mencoder_file'] or
isinstance(writer,
(FFMpegFileWriter, MencoderFileWriter))):
print("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it is only currently supported "
"with the writers 'ffmpeg_file' and 'mencoder_file' "
"(writer used: "
"'{}').".format(writer if isinstance(writer, str)
else writer.__class__.__name__))
savefig_kwargs.pop('bbox_inches')
# Need to disconnect the first draw callback, since we'll be doing
# draws. Otherwise, we'll end up starting the animation.
if self._first_draw_id is not None:
self._fig.canvas.mpl_disconnect(self._first_draw_id)
reconnect_first_draw = True
else:
reconnect_first_draw = False
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# If the writer is None, use the rc param to find the name of the one
# to use
if writer is None:
writer = rcParams['animation.writer']
# Re-use the savefig DPI for ours if none is given
if dpi is None:
dpi = rcParams['savefig.dpi']
if codec is None:
codec = rcParams['animation.codec']
if bitrate is None:
bitrate = rcParams['animation.bitrate']
all_anim = [self]
if not extra_anim is None:
all_anim.extend(anim
for anim
in extra_anim if anim._fig is self._fig)
# If we have the name of a writer, instantiate an instance of the
# registered class.
if is_string_like(writer):
if writer in writers.avail:
writer = writers[writer](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
else:
import warnings
warnings.warn("MovieWriter %s unavailable" % writer)
try:
writer = writers.list()[0]
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install mencoder or "
"ffmpeg to save animations.")
verbose.report('Animation.save using %s' % type(writer),
level='helpful')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existant use case or find a way to make it work.
with writer.saving(self._fig, filename, dpi):
for data in itertools.izip(*[a.new_saved_frame_seq()
for a in all_anim]):
for anim, d in zip(all_anim, data):
#TODO: Need to see if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
writer.grab_frame(**savefig_kwargs)
# Reconnect signal for first draw if necessary
if reconnect_first_draw:
self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
self._start)
def _step(self, *args):
'''
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
'''
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
'Creates a new sequence of frame information.'
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
'Creates a new sequence of saved/cached frame information.'
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
pass
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists, self._blit_cache)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists, self._blit_cache)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists, bg_cache):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = []
for a in artists:
# If we haven't cached the background for this axes object, do
# so now. This might not always be reliable, but it's an attempt
# to automate the process.
if a.axes not in bg_cache:
bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
a.axes.draw_artist(a)
updated_ax.append(a.axes)
# After rendering all the needed artists, blit each axes individually.
for ax in set(updated_ax):
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists, bg_cache):
# Get a list of the axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = set(a.axes for a in artists)
for a in axes:
a.figure.canvas.restore_region(bg_cache[a])
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the
# axes
self._blit_cache = dict()
self._drawn_artists = []
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
self._post_draw(None, self._blit)
def _handle_resize(self, *args):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, evt):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, self._blit)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
class TimedAnimation(Animation):
'''
:class:`Animation` subclass that supports time-based animation, drawing
a new frame every *interval* milliseconds.
*repeat* controls whether the animation should repeat when the sequence
of frames is completed.
*repeat_delay* optionally adds a delay in milliseconds before repeating
the animation.
'''
def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
event_source=None, *args, **kwargs):
# Store the timing information
self._interval = interval
self._repeat_delay = repeat_delay
self.repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer()
event_source.interval = self._interval
Animation.__init__(self, fig, event_source=event_source,
*args, **kwargs)
def _step(self, *args):
'''
Handler for getting events.
'''
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = Animation._step(self, *args)
if not still_going and self.repeat:
self.frame_seq = self.new_frame_seq()
if self._repeat_delay:
self.event_source.remove_callback(self._step)
self.event_source.add_callback(self._loop_delay)
self.event_source.interval = self._repeat_delay
return True
else:
return Animation._step(self, *args)
else:
return still_going
def _stop(self, *args):
# If we stop in the middle of a loop delay (which is relatively likely
# given the potential pause here, remove the loop_delay callback as
# well.
self.event_source.remove_callback(self._loop_delay)
Animation._stop(self)
def _loop_delay(self, *args):
# Reset the interval and change callbacks after the delay.
self.event_source.remove_callback(self._loop_delay)
self.event_source.interval = self._interval
self.event_source.add_callback(self._step)
Animation._step(self)
class ArtistAnimation(TimedAnimation):
'''
Before calling this function, all plotting should have taken place
and the relevant artists saved.
frame_info is a list, with each list entry a collection of artists that
represent what needs to be enabled on each frame. These will be disabled
for other frames.
'''
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
TimedAnimation.__init__(self, fig, *args, **kwargs)
def _init_draw(self):
# Make all the artists involved in *any* frame invisible
axes = []
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
# Assemble a list of unique axes that need flushing
if artist.axes not in axes:
axes.append(artist.axes)
# Flush the needed axes
for ax in axes:
ax.figure.canvas.draw()
def _pre_draw(self, framedata, blit):
'''
Clears artists from the last frame.
'''
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists, self._blit_cache)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
'''
Makes an animation by repeatedly calling a function *func*, passing in
(optional) arguments in *fargs*.
*frames* can be a generator, an iterable, or a number of frames.
*init_func* is a function used to draw a clear frame. If not given, the
results of drawing from the first item in the frames sequence will be
used. This function will be called once before the first frame.
If blit=True, *func* and *init_func* should return an iterable of
drawables to clear.
'''
def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
save_count=None, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self.save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif callable(frames):
self._iter_gen = frames
elif iterable(frames):
self._iter_gen = lambda: iter(frames)
self.save_count = len(frames)
else:
self._iter_gen = lambda: iter(range(frames))
self.save_count = frames
# If we're passed in and using the default, set it to 100.
if self.save_count is None:
self.save_count = 100
self._init_func = init_func
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
TimedAnimation.__init__(self, fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
return iter(self._save_seq)
else:
return itertools.islice(self.new_frame_seq(), self.save_count)
def _init_draw(self):
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
self._draw_frame(next(self.new_frame_seq()))
else:
self._drawn_artists = self._init_func()
def _draw_frame(self, framedata):
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
# Make sure to respect save_count (keep only the last save_count
# around)
self._save_seq = self._save_seq[-self.save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
| mit |
glouppe/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 157 | 2409 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
SunPower/Carousel | examples/PVPower/pvpower/formulas/irradiance.py | 1 | 3682 | # -*- coding: utf-8 -*-
"""
This module contains formulas for calculating PV power.
"""
import pvlib
import pandas as pd
def f_linketurbidity(times, latitude, longitude):
times = pd.DatetimeIndex(times)
# latitude and longitude must be scalar or else linke turbidity lookup fails
latitude, longitude = latitude.item(), longitude.item()
tl = pvlib.clearsky.lookup_linke_turbidity(times, latitude, longitude)
return tl.values.reshape(1, -1)
def f_clearsky(solar_zenith, am_abs, tl, dni_extra, altitude):
cs = pvlib.clearsky.ineichen(solar_zenith, am_abs, tl, dni_extra=dni_extra,
altitude=altitude)
return cs['dni'], cs['ghi'], cs['dhi']
def f_solpos(times, latitude, longitude):
"""
Calculate solar position for lat/long at times.
:param times: Python :class:`datetime.datetime` object.
:type times: list
:param latitude: latitude [degrees]
:type latitude: float
:param longitude: longitude [degrees]
:type longitude: float
:returns: apparent zenith, azimuth
"""
# pvlib converts Python datetime objects to pandas DatetimeIndex
solpos = pvlib.solarposition.get_solarposition(times, latitude, longitude)
# solpos is a pandas DataFrame, so unpack the desired values
# return shape is (2, NOBS), so unc_wrapper sees 2 dependent variables
return solpos['apparent_zenith'].values, solpos['azimuth'].values
def f_dni_extra(times):
times = pd.DatetimeIndex(times)
return pvlib.irradiance.get_extra_radiation(times)
def f_airmass(solar_zenith):
# resize output so uncertainty wrapper can determine observations
return pvlib.atmosphere.get_relative_airmass(solar_zenith).reshape(1, -1)
def f_pressure(altitude):
return pvlib.atmosphere.alt2pres(altitude)
def f_am_abs(airmass, pressure):
am = airmass.squeeze()
return pvlib.atmosphere.get_absolute_airmass(am, pressure).reshape(1, -1)
def f_total_irrad(times, surface_tilt, surface_azimuth, solar_zenith,
solar_azimuth, dni, ghi, dhi, dni_extra, am_abs,
model='haydavies'):
"""
Calculate total irradiance
:param times: timestamps
:param surface_tilt: panel tilt from horizontal [deg]
:param surface_azimuth: panel azimuth from north [deg]
:param solar_zenith: refracted solar zenith angle [deg]
:param solar_azimuth: solar azimuth [deg]
:param dni: direct normal irradiance [W/m**2]
:param ghi: global horizonal irradiance [W/m**2]
:param dhi: diffuse horizontal irradiance [W/m**2]
:param dni_extra: extraterrestrial irradiance [W/m**2]
:param am_abs: absolute airmass [dimensionless]
:param model: irradiance model name, default is ``'haydavies'``
:type model: str
:return: global, direct and diffuse plane of array irradiance [W/m**2]
"""
am_abs = am_abs.squeeze()
# make a DataFrame for time series arguments
df = pd.DataFrame(
{'solar_zenith': solar_zenith, 'solar_azimuth': solar_azimuth,
'dni': dni, 'ghi': ghi, 'dhi': dhi, 'dni_extra': dni_extra,
'am_abs': am_abs},
index=times
)
# calculate total irradiance using PVLIB
total_irrad = pvlib.irradiance.get_total_irradiance(
surface_tilt, surface_azimuth, df['solar_zenith'], df['solar_azimuth'],
df['dni'], df['ghi'], df['dhi'], dni_extra=df['dni_extra'],
airmass=df['am_abs'], model=model
).fillna(0.0)
# convert to ndarrays
poa_global = total_irrad['poa_global'].values
poa_direct = total_irrad['poa_direct'].values
poa_diffuse = total_irrad['poa_diffuse'].values
return poa_global, poa_direct, poa_diffuse
| bsd-3-clause |
ville-k/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 23 | 5276 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
ZENGXH/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
jakobj/nest-simulator | pynest/examples/one_neuron.py | 14 | 3680 | # -*- coding: utf-8 -*-
#
# one_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron example
------------------
This script simulates a neuron driven by a constant external current
and records its membrane potential.
See Also
~~~~~~~~
:doc:`twoneurons`
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages and reset the kernel.
# Resetting the kernel allows you to execute the script several
# times in a Python shell without interferences from previous NEST
# simulations. Thus, without resetting the kernel the network status
# including connections between nodes, status of neurons, devices and
# intrinsic time clocks, is kept and influences the next simulations.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the nodes (neurons and devices) are created using ``Create``.
# We store the returned handles in variables for later reference.
# The ``Create`` function also allow you to create multiple nodes
# e.g. ``nest.Create('iaf_psc_alpha',5)``
# Also default parameters of the model can be configured using ``Create``
# by including a list of parameter dictionaries
# e.g. `nest.Create("iaf_psc_alpha", params=[{'I_e':376.0}])`.
# In this example we will configure these parameters in an additional
# step, which is explained in the third section.
neuron = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, we set the external current of the neuron.
neuron.I_e = 376.0
###############################################################################
# Fourth, the neuron is connected to the voltmeter. The command
# ``Connect`` has different variants. Plain ``Connect`` just takes the
# handles of pre- and postsynaptic nodes and uses the default values
# for weight and delay. Note that the connection direction for the voltmeter is
# reversed compared to the spike recorder, because it observes the
# neuron instead of receiving events from it. Thus, ``Connect``
# reflects the direction of signal flow in the simulation kernel
# rather than the physical process of inserting an electrode into the
# neuron. The latter semantics is presently not available in NEST.
nest.Connect(voltmeter, neuron)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time and display the plot using pyplot.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
LiuVII/Self-driving-RC-car | script_multi.py | 1 | 5002 | import os, time
import argparse
import re
import pandas as pd
from datetime import datetime
import shutil
import csv
from collections import deque
def check_parameters():
if not os.path.exists(data_set_dir+args.record_set+"_log.csv"):
print "Corresponding log.csv file for %s does not exists" % \
(args.record_set)
exit(1)
# elif not os.path.exists(image_set_dir+args.record_set):
# print "%s data set does not exist" % (args.record_set)
# exit(1)
if not os.path.exists(stream_dir+args.record_stream):
print "%s stream path does not exist" % (args.record_set)
exit(1)
if os.path.exists(data_set_dir+args.output_set+"_log.csv"):
print "%s output path already exists" % (args.output_set)
exit(1)
def get_ctime(file_name, path):
tmp = path+file_name
ts = os.path.getctime(tmp)
st = datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S-%f')[:-4]
stamped_name = st + "_" + file_name
return stamped_name
def process_data():
in_csv_file = data_set_dir+args.record_set+"_log.csv"
left_lst = os.listdir(file_path[0]).sort()
right_lst = os.listdir(file_path[1]).sort()
df = pd.read_csv(in_csv_file, names=['img_left','img_right','command'])
entries = []
ind = 2
st_ind = 1
end_ind = -1
ranges = []
while ind < len(df.index):
if df['command'].iloc[st_ind] == df['command'].iloc[ind]:
end_ind = ind
else:
if end_ind >= 0:
ranges.append([st_ind,end_ind])
else:
ranges.append([st_ind,st_ind])
st_ind = ind
end_ind = -1
ind += 1
if (end_ind >= 0):
ranges.append([st_ind,end_ind])
else:
ranges.append([st_ind,st_ind])
print(ranges)
for inds in ranges:
while left_lst and \
re.search(".*(IMG_[0-9]*\.bmp$)",df['img_left'].iloc[inds[0]]).group(1) != \
re.search(".*(IMG_[0-9]*\.bmp$)",left_lst[0]).group(1):
left_lst = left_lst[1:]
while right_lst and \
re.search(".*(IMG_[0-9]*\.bmp$)",df['img_right'].iloc[inds[0]]).group(1) != \
re.search(".*(IMG_[0-9]*\.bmp$)",right_lst[0]).group(1):
right_lst = right_lst[1:]
left_stk = []
while left_lst and \
re.search(".*(IMG_[0-9]*\.bmp$)",df['img_left'].iloc[inds[1]]).group(1) != \
re.search(".*(IMG_[0-9]*\.bmp$)",left_lst[0]).group(1):
left_stk.append(left_lst[0])
left_lst = left_lst[1:]
left_stk.append(left_lst[0])
left_lst = left_lst[1:]
right_stk = []
while right_lst and \
re.search(".*(IMG_[0-9]*\.bmp$)",df['img_right'].iloc[inds[1]]).group(1) != \
re.search(".*(IMG_[0-9]*\.bmp$)",right_lst[0]).group(1):
right_stk.append(right_lst[0])
right_lst = right_lst[1:]
right_stk.append(right_lst[0])
right_lst = right_lst[1:]
while right_stk and left_stk:
left_tmp = get_ctime(left_stk[0],file_path[0])
right_tmp = get_ctime(right_stk[0],file_path[1])
entries.append([left_tmp, right_tmp, df['command'].iloc[inds[0]]])
# print(file_path[0]+left_stk[0],output_path[0]+left_tmp)
# print(file_path[1]+right_stk[0],output_path[1]+right_tmp)
shutil.copy(file_path[0]+left_stk[0],output_path[0]+left_tmp)
shutil.copy(file_path[1]+right_stk[0],output_path[1]+right_tmp)
left_stk = left_stk[1:]
right_stk = right_stk[1:]
print(len(entries), len(df.index))
out_csv_file = data_set_dir+args.output_set+"_log.csv"
with open(out_csv_file, 'w') as out_csv:
writer = csv.writer(out_csv, delimiter=',')
writer.writerow(['img_left','img_right','command'])
for entry in entries:
writer.writerow(entry)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Data Creation")
parser.add_argument(
"record_set",
type=str,
help="Name of recorded data set"
)
parser.add_argument(
"record_stream",
type=str,
help="Name of recording stream path"
)
parser.add_argument(
"output_set",
type=str,
help="Name of output data set"
)
args =parser.parse_args()
image_set_dir = "./data_sets/"
data_set_dir = "./model_data/"
stream_dir = "./stream/"
check_parameters()
print "Start processing"
print
# create output folder
os.mkdir(image_set_dir+args.output_set)
os.mkdir(image_set_dir+args.output_set+"/left")
os.mkdir(image_set_dir+args.output_set+"/right")
file_path = [stream_dir+args.record_stream+"/left/",
stream_dir+args.record_stream+"/right/"]
output_path = [image_set_dir+args.output_set+"/left/",
image_set_dir+args.output_set+"/right/"]
process_data()
| mit |
moutai/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 67 | 9084 | import numpy as np
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
ClockworkOrigins/m2etis | configurator/quicktest/Reporting.py | 1 | 4530 | __author__ = 'amw'
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from configurator.persistence.PersistenceManager import PersistenceManager
import configurator.util.util as util
from scipy.interpolate import griddata
from configurator.util.util import sanitize_results
import yaml
def create_plot_matrix(datasets, type, numRows, numCols, title, labels):
plt.close('all')
fig = plt.figure()
fig.suptitle(title)
for i in range(numRows*numCols):
if i >= len(datasets):
break
dataset = datasets[i]
if type == "2d":
ax = plt.subplot(numRows,numCols,i)
ax.plot(dataset[0], dataset[1])
ax.set_title(dataset[2])
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
if type == "3d":
#plt.gca().invert_xaxis()
ax = plt.subplot(numRows,numCols, i, projection='3d')
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
x = dataset[0]
y = dataset[1]
z = dataset[2]
xi = np.linspace(min(x), max(x),100)
yi = np.linspace(min(y), max(y),100)
zi = griddata((x, y), z, (xi[None,:], yi[:,None]))
xig, yig = np.meshgrid(xi, yi)
ax.plot_surface(xig,yig,zi)
ax.scatter(x,y,z, color="red")
ax.set_title(dataset[3])
plt.show()
def _get_corresponding_results(results, x_param, y_param):
resultList = [[] for i in range(0, 2)]
for result in results:
for position, parameter in enumerate([x_param, y_param]):
category = parameter.split('.')[0]
item = parameter.split('.')[1]
resultList[position].append(float(util.strip_unit([result[category][item]])[0]))
return zip(*sorted(zip(*resultList)))
def _get_corresponding_results_3d(results, x_param, y_param, z_param):
resultList = [[] for i in range(0, 3)]
for result in results:
for position, parameter in enumerate([x_param, y_param, z_param]):
#print position, parameter
category = parameter.split('.')[0]
item = parameter.split('.')[1]
resultList[position].append(float(util.strip_unit([result[category][item]])[0]))
return zip(*sorted(zip(*resultList)))
def create_dataset(persistence, type, tag, params, filter):
if type == "2d":
results = persistence.retrieve_from_database(persistence.config["collection_name"]
, dict({"tag": tag}.items() + filter.items())
, {"_id": 0, params[0]: 1, params[1]: 1}, [(params[0], 1)])
xs, ys = _get_corresponding_results(results, params[0], params[1])
title = ""
for item in filter:
title = title + " " + str(item.split(".")[1]) + ":" + str(filter[item])
dataset = [xs,ys, title]
if type == "3d":
results = persistence.retrieve_from_database(persistence.config["collection_name"]
, dict({"tag": tag}.items() + filter.items())
, {"_id": 0, params[0]: 1, params[1]: 1, params[2]: 1}, [(params[0], 1)])
xs, ys, zs = _get_corresponding_results_3d(results, params[0], params[1], params[2])
title = ""
for item in filter:
title = title + " " + str(item.split(".")[1]) + ":" + str(filter[item])
dataset = [xs, ys, zs, title]
return dataset
def plot2d(db_config, tag, x_param, y_param, filter_list, numRows, numCols, title):
persistence = PersistenceManager(db_config)
persistence.initialise_database_client()
datasets = []
for filter in filter_list:
dataset = create_dataset(persistence, "2d", tag, [x_param, y_param], filter)
datasets.append(dataset)
create_plot_matrix(datasets, "2d", numRows, numCols, title, [str(x_param.split(".")[1]), y_param.split(".")[1]])
def plot3d(db_config, tag, x_param, y_param, z_param, filter_list, numRows, numCols, title):
persistence = PersistenceManager(db_config)
persistence.initialise_database_client()
datasets = []
for filter in filter_list:
dataset = create_dataset(persistence, "3d", tag, [x_param, y_param, z_param], filter)
datasets.append(dataset)
create_plot_matrix(datasets, "3d", numRows, numCols, title, [str(x_param.split(".")[1]), y_param.split(".")[1], z_param.split(".")[1]])
| apache-2.0 |
yl565/statsmodels | statsmodels/datasets/nile/data.py | 5 | 1907 | """Nile River Flows."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = """Nile River flows at Ashwan 1871-1970"""
SOURCE = """
This data is first analyzed in:
Cobb, G. W. 1978. "The Problem of the Nile: Conditional Solution to a
Changepoint Problem." *Biometrika*. 65.2, 243-51.
"""
DESCRSHORT = """This dataset contains measurements on the annual flow of
the Nile as measured at Ashwan for 100 years from 1871-1970."""
DESCRLONG = DESCRSHORT + " There is an apparent changepoint near 1898."
#suggested notes
NOTE = """::
Number of observations: 100
Number of variables: 2
Variable name definitions:
year - the year of the observations
volumne - the discharge at Aswan in 10^8, m^3
"""
from numpy import recfromtxt, array
from pandas import Series, DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the Nile data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
names = list(data.dtype.names)
endog_name = 'volume'
endog = array(data[endog_name], dtype=float)
dataset = Dataset(data=data, names=[endog_name], endog=endog,
endog_name=endog_name)
return dataset
def load_pandas():
data = DataFrame(_get_data())
# TODO: time series
endog = Series(data['volume'], index=data['year'].astype(int))
dataset = Dataset(data=data, names=list(data.columns),
endog=endog, endog_name='volume')
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + '/nile.csv', 'rb') as f:
data = recfromtxt(f, delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
bsipocz/astropy | astropy/visualization/wcsaxes/tests/test_wcsapi.py | 1 | 6661 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy import units as u
from astropy.wcs.wcsapi import BaseLowLevelWCS
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy.units import Quantity
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
from astropy.wcs import WCS
from astropy.visualization.wcsaxes.frame import RectangularFrame
from astropy.visualization.wcsaxes.wcsapi import (WCSWorld2PixelTransform,
transform_coord_meta_from_wcs)
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ['x', 'y']
WCS2D.wcs.cunit = ['km', 'km']
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0., 0.]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ['x', 'y', 'z']
WCS3D.wcs.cunit = ['km', 'km', 'km']
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0., 0., 1.]
def test_shorthand_inversion():
"""
Test that the Matplotlib subtraction shorthand for composing and inverting
transformations works.
"""
w1 = WCS(naxis=2)
w1.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ['RA---SIN', 'DEC--SIN']
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS3D[:, 0, :]) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 1], world_2[:, 1])
CTYPE_CASES = [(' LON-TAN', ('longitude', None, None)),
(' LAT-TAN', ('latitude', None, None)),
('HPLN-TAN', ('longitude', u.arcsec, 180.)),
('HPLT-TAN', ('latitude', u.arcsec, None)),
('RA---TAN', ('longitude', u.hourangle, None)),
('DEC--TAN', ('latitude', None, None)),
('spam', ('scalar', None, None))]
def test_coord_type_from_ctype():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['GLON-TAN', 'GLAT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['HPLN-TAN', 'HPLT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.arcsec, u.arcsec]
assert coord_meta['wrap'] == [180., None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.hourangle, u.deg]
assert coord_meta['wrap'] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['spam', 'spam']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['scalar', 'scalar']
assert coord_meta['format_unit'] == [u.one, u.one]
assert coord_meta['wrap'] == [None, None]
class LowLevelWCS5D(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 5
@property
def world_axis_physical_types(self):
return ['em.freq', 'time', 'pos.eq.ra', 'pos.eq.dec', 'phys.polarization.stokes']
@property
def world_axis_units(self):
return ['Hz', 'day', 'deg', 'deg', '']
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements
return [np.asarray(pix) * scale for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2])]
def array_index_to_world_values(self, *index_arrays):
return self.pixel_to_world_values(index_arrays[::-1])[::-1]
def world_to_pixel_values(self, *world_arrays):
world_arrays = world_arrays[:2] # make list have 2 elements
return [np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2])]
def world_to_array_index_values(self, *world_arrays):
return np.round(self.world_to_array_index_values(world_arrays[::-1])[::-1]).astype(int)
@property
def world_axis_object_components(self):
return [('freq', 0, 'value'),
('time', 0, 'mjd'),
('celestial', 0, 'spherical.lon.degree')
('celestial', 1, 'spherical.lat.degree'),
('stokes', 0, 'value')]
@property
def world_axis_object_classes(self):
return {'celestial': (SkyCoord, (), {'unit': 'deg'}),
'time': (Time, (), {'format': 'mjd'}),
'freq': (Quantity, (), {'unit': 'Hz'}),
'stokes': (Quantity, (), {'unit': 'one'})}
class TestWCSAPI:
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_wcsapi_5d(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D())
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
| bsd-3-clause |
DEK11/Predicting-EOB-delay | withoutpayer.py | 1 | 2272 | import pandas as pd
import numpy as np
train = pd.read_csv('train.csv', header=0)
test = pd.read_csv('test.csv', header=0)
delcol = ['claim_file_arrival_year','claim_file_arrival_month','bill_print_year','bill_print_month','claim_min_service_year','claim_max_service_year','claim_frequency_type_code','claim_min_service_month','claim_payer_name']
train = train.drop(delcol, axis=1)
test = test.drop(delcol, axis=1)
trainDelay = train.Delay
testDelay = test.Delay
train = train.drop(['Delay'], axis=1)
test = test.drop(['Delay'], axis=1)
from sklearn import preprocessing
for col in train.columns:
scaler = preprocessing.StandardScaler()
train[col] = scaler.fit_transform(train[col])
test[col] = scaler.transform(test[col])
import xgboost as xgb
dtrain = xgb.DMatrix(train.values, label = trainDelay)
#train = []
dtrain.save_binary("dtrain.buffer")
#dtrain = xgb.DMatrix('dtrain.buffer')
params = {'objective': 'reg:linear',
'eta' : 0.01,
'max_depth' : 10,
'subsample' : 0.9,
'colsample_bytree': 0.9}
num_rounds = 3000
model = xgb.train(params, dtrain, num_rounds)
model.save_model('xgbmodel.model')
#model = xgb.Booster({'nthread':4})
#model.load_model("xgbmodel.model")
dtest = xgb.DMatrix(test.values)
dtest.save_binary("dtest.buffer")
#dtest = xgb.DMatrix('dtest.buffer')
#test = []
pred2 = model.predict(dtest)
df2 = pd.DataFrame()
df2["Orginal"] = testDelay
df2["Predicted"] = pred2
df2.to_csv('compareDelay.csv', index = False)
import matplotlib.pyplot as plt
plt.style.use("ggplot")
mapper = { 'f{0}' . format (I): v for I, v in enumerate (train.columns)}
mapped = {mapper [k]: v for k, v in model.get_fscore().items()}
import operator
mapped = sorted(mapped.items(), key=operator.itemgetter(1))
xgb.plot_importance(model)
plt.show()
df = pd.DataFrame(mapped, columns=['feature', 'fscore'])
df['fscore'] = df['fscore'] / df['fscore'].sum()
df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(25, 15))
plt.title('XGBoost Feature Importance')
plt.xlabel('relative importance')
plt.gcf().savefig('feature_importance_xgb.png')
xx = np.linspace(-10,500)
yy = xx
h0 = plt.plot(xx, yy, 'k-', label="ideal Values")
plt.scatter(df2.Orginal, df2.Predicted, c = 'y')
plt.legend()
plt.show()
| apache-2.0 |
blab/nextstrain-db | analysis/HIxFRA_plot.py | 2 | 2607 | import matplotlib.pyplot as plt
import seaborn as sns; sns.set(color_codes=True)
import numpy as np
import math
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--infile', default=None, type=str, help="file to graph")
parser.add_argument('--rtype', default="linear", type=str, help="type of regression: linear or lowess")
def x_y(fname, rtype):
o = open(fname, 'r')
x, y = [] , []
hi, fra = [], []
hi_v, fra_v = {}, {}
for line in o.readlines():
l = line.split('\t')
tup = (l[0], l[1], l[2])
if l[5] == 'hi':
hi.append(tup)
hi_v[tup] = float(l[4])
elif l[5] == 'fra':
fra.append(tup)
fra_v[tup] = float(l[4])
paired = []
for t in hi:
if t in fra:
paired.append(t)
ferret_ref_hi = {}
ferret_ref_fra = {}
x = []
y = []
for t in paired:
if t[0] == t[1]:
ferret_ref_hi[t[2]] = math.log(hi_v[t],2)
ferret_ref_fra[t[2]] = math.log(fra_v[t],2)
if rtype == "lowess":
x.append(0)
y.append(0)
print("references:", len(x))
count = 0
for t in paired:
if (t[0] != t[1]) and (t[2] in ferret_ref_hi.keys()):
hi_drop = -( math.log(hi_v[t]) - ferret_ref_hi[t[2]])
fra_drop = -( math.log(fra_v[t]) - ferret_ref_fra[t[2]])
x.append(hi_drop)
if hi_drop < 0:
count += 1
y.append(fra_drop)
print("total:", len(x))
print("count:", count)
return x, y
def plot_it(x,y,rtype):
x = np.asarray(x)
y = np.asarray(y)
if rtype == "lowess":
ax = sns.regplot(x=x, y=y, color="b", scatter_kws={'alpha':0.3}, x_jitter=.25, y_jitter=.25, lowess=True)
ax.set(xlabel='HI Titer Drop', ylabel='FRA Titer Drop')
ax.set_title('HI vs. FRA Titer Drops with Lowess Curve')
plt.show()
else:
ax = sns.regplot(x=x, y=y, color="r", scatter_kws={'alpha':0.3}, x_jitter=.25, y_jitter=.1, fit_reg=True, ci=95)
ax.set(xlabel='HI Titer Drop', ylabel='FRA Titer Drop')
ax.set_title('HI vs. FRA Titer Drops with Linear Regression')
regression = np.polyfit(x, y, 1)
print(regression)
plt.show()
if __name__=="__main__":
args = parser.parse_args()
f = args.infile
r = args.rtype
x, y = x_y(f, r)
plot_it(x, y, r)
| agpl-3.0 |
mehdidc/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
alekz112/statsmodels | statsmodels/examples/ex_misc_tarma.py | 34 | 1875 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 03 23:01:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess
from statsmodels.miscmodels.tmodel import TArma
from statsmodels.tsa.arima_model import ARMA
nobs = 500
ar = [1, -0.6, -0.1]
ma = [1, 0.7]
dist = lambda n: np.random.standard_t(3, size=n)
np.random.seed(8659567)
x = arma_generate_sample(ar, ma, nobs, sigma=1, distrvs=dist,
burnin=500)
mod = TArma(x)
order = (2, 1)
res = mod.fit(order=order)
res2 = mod.fit_mle(order=order, start_params=np.r_[res[0], 5, 1], method='nm')
print(res[0])
proc = ArmaProcess.from_coeffs(res[0][:order[0]], res[0][:order[1]])
print(ar, ma)
proc.nobs = nobs
# TODO: bug nobs is None, not needed ?, used in ArmaProcess.__repr__
print(proc.ar, proc.ma)
print(proc.ar_roots(), proc.ma_roots())
from statsmodels.tsa.arma_mle import Arma
modn = Arma(x)
resn = modn.fit_mle(order=order)
moda = ARMA(x, order=order)
resa = moda.fit( trend='nc')
print('\nparameter estimates')
print('ls ', res[0])
print('norm', resn.params)
print('t ', res2.params)
print('A ', resa.params)
print('\nstandard deviation of parameter estimates')
#print 'ls ', res[0] #TODO: not available yet
print('norm', resn.bse)
print('t ', res2.bse)
print('A ', resa.bse)
print('A/t-1', resa.bse / res2.bse[:3] - 1)
print('other bse')
print(resn.bsejac)
print(resn.bsejhj)
print(res2.bsejac)
print(res2.bsejhj)
print(res2.t_test(np.eye(len(res2.params))))
# TArma has no fittedvalues and resid
# TODO: check if lag is correct or if fitted `x-resid` is shifted
resid = res2.model.geterrors(res2.params)
fv = res[2]['fvec'] #resid returned from leastsq?
import matplotlib.pyplot as plt
plt.plot(x, 'o', alpha=0.5)
plt.plot(x-resid)
plt.plot(x-fv)
#plt.show()
| bsd-3-clause |
phdowling/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
huard/scipy-work | scipy/io/examples/read_array_demo1.py | 2 | 1440 | #=========================================================================
# NAME: read_array_demo1
#
# DESCRIPTION: Examples to read 2 columns from a multicolumn ascii text
# file, skipping the first line of header. First example reads into
# 2 separate arrays. Second example reads into a single array. Data are
# then plotted.
#
# Here is the format of the file test.txt:
# --------
# Some header to skip
# 1 2 3
# 2 4 6
# 3 6 9
# 4 8 12
#
# USAGE:
# python read_array_demo1.py
#
# PARAMETERS:
#
# DEPENDENCIES:
# matplotlib (pylab)
# test.txt
#
#
# AUTHOR: Simon J. Hook
# DATE : 09/23/2005
#
# MODIFICATION HISTORY:
#
# COMMENT:
#
#============================================================================
from scipy import *
from scipy.io import read_array
from pylab import *
def main():
# First example, read first and second column from ascii file. Skip first
# line of header.
# Note use of (1,-1) in lines to skip first line and then read to end of file
# Note use of (0,) in columns to pick first column, since its a tuple need trailing comma
x=read_array("test.txt",lines=(1,-1), columns=(0,))
y=read_array("test.txt",lines=(1,-1), columns=(1,))
#Second example, read the file into a single arry
z=read_array("test.txt",lines=(1,-1), columns=(0,2))
# Plot the data
plot(x,y,'r--',z[:,0],z[:,1])
show()
# The one and only main function
if __name__ == "__main__":
main()
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/decomposition/nmf.py | 35 | 39369 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Mathieu Blondel <mathieu@mblondel.org>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..utils import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
eps: float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(beta)
* np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, alpha, l1_ratio, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L1 and L2 regularizations
l1_reg = 1. * l1_ratio * alpha
l2_reg = (1. - l1_ratio) * alpha
# L2 regularization corresponds to increase the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization correponds to decrease each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
seed = random_state.randint(np.iinfo(np.int32).max)
return _update_cdnmf_fast(W, HHt, XHt, shuffle, seed)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If True, the samples will be taken in shuffled order during
coordinate descent.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, alpha_W,
l1_ratio, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, alpha_H,
l1_ratio, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean
If True, the samples will be taken in shuffled order during
coordinate descent.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom':
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H, given in 'fit' method.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
shuffle : boolean
If True, the samples will be taken in shuffled order during
coordinate descent.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
| bsd-3-clause |
ekansa/open-context-py | opencontext_py/apps/imports/kobotoolbox/dbupdate.py | 1 | 48497 | import copy
import csv
import uuid as GenUUID
import os, sys, shutil
import codecs
import numpy as np
import pandas as pd
from django.db import models
from django.db.models import Q
from django.conf import settings
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.assertions.sorting import AssertionSorting
from opencontext_py.apps.ocitems.subjects.models import Subject
from opencontext_py.apps.ocitems.subjects.generation import SubjectGeneration
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.imports.records.create import ImportRecords
from opencontext_py.apps.imports.fields.models import ImportField
from opencontext_py.apps.imports.fields.create import ImportFields
from opencontext_py.apps.imports.fieldannotations.models import ImportFieldAnnotation
from opencontext_py.apps.imports.sources.models import ImportSource
from opencontext_py.apps.imports.sources.create import ImportRefineSource
from opencontext_py.apps.imports.sources.unimport import UnImport
from opencontext_py.apps.imports.sources.finalize import FinalizeImport
from opencontext_py.apps.imports.kobotoolbox.utilities import (
UUID_SOURCE_KOBOTOOLBOX,
UUID_SOURCE_OC_KOBO_ETL,
UUID_SOURCE_OC_LOOKUP,
LINK_RELATION_TYPE_COL,
list_excel_files,
read_excel_to_dataframes,
make_directory_files_df,
drop_empty_cols,
reorder_first_columns,
lookup_manifest_uuid,
)
from opencontext_py.apps.imports.kobotoolbox.attributes import (
REPROJECTED_LAT_COL,
REPROJECTED_LON_COL,
)
from opencontext_py.apps.imports.kobotoolbox.contexts import (
PATH_CONTEXT_COLS
)
from opencontext_py.apps.imports.kobotoolbox.kobofields import KoboFields
from opencontext_py.apps.imports.kobotoolbox.media import (
OPENCONTEXT_MEDIA_TYPES
)
DB_LOAD_RESULT_A_COL = 'OC_DB_LOAD_OK'
DB_LOAD_RESULT_B_COL = 'OC_DB_LOAD_B_OK'
DEFAULT_OBS_NUM = 1
CLASS_CONTEXT_IMPORT_ORDER = [
'oc-gen:cat-region',
'oc-gen:cat-site',
'oc-gen:cat-area',
'oc-gen:cat-trench',
'oc-gen:cat-exc-unit',
'oc-gen:cat-locus',
'oc-gen:cat-sample-col',
'oc-gen:cat-sample',
'oc-gen:cat-object',
'oc-gen:cat-arch-element',
'oc-gen:cat-pottery',
]
MEDIA_FILETYPE_ATTRIBUTE_CONFIGS = [
{
'source-column': file_type['col'],
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': file_type['col'],
'field_type': 'media',
'field_value_cat': file_type['file_type']
},
'field_rel': {
'predicate': ImportFieldAnnotation.PRED_MEDIA_PART_OF,
},
}
for file_type in OPENCONTEXT_MEDIA_TYPES
]
GEO_ATTRIBUTE_CONFIGS = [
{
'source-column': REPROJECTED_LAT_COL,
'sources': ['catalog', 'locus', 'bulk-finds', 'small-finds', 'trench-book',],
'match_type': 'exact',
'field_args': {
'label': REPROJECTED_LAT_COL,
'field_type': 'lat',
'field_value_cat': 'xsd:double',
},
'field_rel': {
'predicate': ImportFieldAnnotation.PRED_GEO_LOCATION,
},
},
{
'source-column': REPROJECTED_LON_COL,
'sources': ['catalog', 'locus', 'bulk-finds', 'small-finds', 'trench-book',],
'match_type': 'exact',
'field_args': {
'label': REPROJECTED_LON_COL,
'field_type': 'lon',
'field_value_cat': 'xsd:double',
},
'field_rel': {
'predicate': ImportFieldAnnotation.PRED_GEO_LOCATION,
},
},
]
DF_ATTRIBUTE_CONFIGS = MEDIA_FILETYPE_ATTRIBUTE_CONFIGS + GEO_ATTRIBUTE_CONFIGS + [
{
'source-column': 'label',
'sources': ['locus',],
'match_type': 'exact',
'field_args': {
'label': 'Locus Label',
'is_keycell': True,
'field_type': 'subjects',
'field_value_cat': 'oc-gen:cat-locus'
},
'subject_pk': True,
},
{
'source-column': 'label',
'sources': ['bulk-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Bulk Find Label',
'is_keycell': True,
'field_type': 'subjects',
'field_value_cat': 'oc-gen:cat-sample-col'
},
'subject_pk': True,
},
{
'source-column': 'label',
'sources': ['small-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Small Find Label',
'is_keycell': True,
'field_type': 'subjects',
'field_value_cat': 'oc-gen:cat-sample'
},
'subject_pk': True,
},
{
'source-column': 'label',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Object Label',
'is_keycell': True,
'field_type': 'subjects',
'field_value_cat': 'oc-gen:cat-object'
},
'subject_pk': True,
},
{
'source-column': 'Trench Book Title',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Trench Book Title',
'is_keycell': True,
'field_type': 'documents',
'field_value_cat': ''
},
'subject_pk': True,
},
{
'source-column': 'Entry Text',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Entry Text',
'field_type': 'documents',
'field_value_cat': 'oc-gen:document-text'
},
'field_rel': {
'predicate': 'oc-gen:document-text',
},
},
{
'source-column': 'File Title',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'File Title',
'is_keycell': True,
'field_type': 'media',
'field_value_cat': ''
},
'subject_pk': True,
},
{
'source-column': 'Data Entry Person',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Data Entry Person',
'field_type': 'persons',
'field_data_type': 'id',
'field_value_cat': ''
},
'field_rel': {
'predicate': 'oc-9', # Catalogued by
},
},
{
'source-column': 'Data Entry Person',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'File Creator',
'field_type': 'persons',
'field_data_type': 'id',
'field_value_cat': ''
},
'field_rel': {
'predicate': 'oc-14', # Photographed by
},
},
{
'source-column': 'File Creator',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'File Creator',
'field_type': 'persons',
'field_data_type': 'id',
'field_value_cat': ''
},
'field_rel': {
'predicate': 'oc-14', # Photographed by
},
},
{
'source-column': 'Trench Supervisor',
'sources': ['catalog', 'locus', 'bulk-finds', 'small-finds', 'trench-book',],
'match_type': 'startswith',
'field_args': {
'label': 'Trench Supervisor',
'field_type': 'persons',
'field_data_type': 'id',
'field_value_cat': ''
},
'field_rel': {
'predicate': 'oc-28', # Principal Author / Analyst
},
},
{
'source-column': 'Size (Notes)',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Size',
'f_uuid': 'B6D48580-AF49-409C-1172-E27CBA31F235',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Condition (Notes)',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Condition',
'f_uuid': '4909306F-3102-47A2-66A3-561C296147BB',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Description',
'sources': ['catalog', 'all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Description',
'f_uuid': '7DBB5CB7-599F-42D5-61EE-1955CF898990',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Trench ID',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Trench',
'f_uuid': 'bd3aba0c-672a-4a1e-81ea-5408768ce407',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Date Cataloged',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Date Cataloged',
'f_uuid': '2d60965b-5151-446c-99b7-402e94e44c25',
'field_type': 'description',
'field_data_type': 'xsd:date',
},
},
{
'source-column': 'Year',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Year',
'f_uuid': '2C7FE888-C431-4FBD-39F4-38B7D969A811',
'field_type': 'description',
'field_data_type': 'xsd:integer',
},
},
{
'source-column': 'Record Type',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Record Type',
'f_uuid': '609ff344-7304-48e3-8db4-64b47dd12215',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Supplemental Find Identification Note',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Supplemental Find Identification Note',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Munsell Color',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Munsell Color',
'f_uuid': '9b99354c-55a2-45e0-9bfd-79bd7f2a801a',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Fabric Category',
'sources': ['catalog',],
'match_type': 'startswith',
'field_args': {
'label': 'Fabric Category',
'f_uuid': 'A70236CA-1599-42F5-4A12-ACEC8C423850',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Other Fabric Note',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Other Fabric Note',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Object General Type',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Object Type', # Note the difference from the source-column!
'f_uuid': '7DB79382-7432-42A4-FBC5-EF760691905A',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Object Type',
'sources': ['catalog', 'small-finds',],
'match_type': 'startswith',
'field_args': {
'label': 'Object Type',
'f_uuid': '7DB79382-7432-42A4-FBC5-EF760691905A',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Object Type, Title',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Catalog ID Note',
'f_uuid': '46c4ea6d-232f-45ec-97f8-3dd2762bcb56',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Decorative Techniques and Motifs/Decorative Technique',
'sources': ['catalog',],
'match_type': 'startswith',
'field_args': {
'label': 'Decorative Technique',
'f_uuid': 'F07C30BC-6C71-4C97-7893-D61FF6D0B59B',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Decorative Techniques and Motifs/Other Decorative Technique Note',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Other Decorative Technique Note',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Decorative Techniques and Motifs/Motif',
'sources': ['catalog',],
'match_type': 'startswith',
'field_args': {
'label': 'Motif',
'f_uuid': '9B260671-CBBD-490E-48B0-CDC48F5DF62D',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Decorative Techniques and Motifs/Other Motif Note',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Other Motif Note',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Vessel Form',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Vessel Form',
'f_uuid': '6A890B60-3811-44AE-A554-CC8245C4D946',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Vessel Part Present',
'sources': ['catalog',],
'match_type': 'exact',
'field_args': {
'label': 'Vessel Part Present',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Vessel Part Present',
'sources': ['catalog',],
'match_type': 'startswith',
'field_args': {
'label': 'Vessel Part Present',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Find Spot/Grid X',
'sources': ['catalog', 'small-find',],
'match_type': 'exact',
'field_args': {
'label': 'Grid (X)',
'f_uuid': 'b428ff04-670b-4912-a237-ad8ff9635f5a',
'field_type': 'description',
'field_data_type': 'xsd:double',
},
},
{
'source-column': 'Find Spot/Grid Y',
'sources': ['catalog', 'small-find',],
'match_type': 'exact',
'field_args': {
'label': 'Grid (Y)',
'f_uuid': '3e0c2eb3-266b-4fa4-ba59-c5c793a1e96d',
'field_type': 'description',
'field_data_type': 'xsd:double',
},
},
{
'source-column': 'Find Spot/Elevation',
'sources': ['catalog', 'small-find',],
'match_type': 'exact',
'field_args': {
'label': 'Elevation',
'f_uuid': 'aaa910a0-51c1-472e-9bd6-67e333e63bbd',
'field_type': 'description',
'field_data_type': 'xsd:double',
},
},
{
'source-column': 'Find Spot/Measurement Uncertainties/Grid X Uncertainty (+/- cm)',
'sources': ['catalog', 'small-find',],
'match_type': 'exact',
'field_args': {
'label': 'Grid X Uncertainty (+/- cm)',
'field_type': 'description',
'field_data_type': 'xsd:double',
},
},
{
'source-column': 'Find Spot/Measurement Uncertainties/Grid Y Uncertainty (+/- cm)',
'sources': ['catalog', 'small-find',],
'match_type': 'exact',
'field_args': {
'label': 'Grid Y Uncertainty (+/- cm)',
'field_type': 'description',
'field_data_type': 'xsd:double',
},
},
{
'source-column': 'Find Spot/Measurement Uncertainties/Elevation Uncertainty (+/- cm)',
'sources': ['catalog', 'small-find',],
'match_type': 'exact',
'field_args': {
'label': 'Elevation Uncertainty (+/- cm)',
'field_type': 'description',
'field_data_type': 'xsd:double',
},
},
{
'source-column': 'Find Spot/Measurement Uncertainties/Uncertainty Comment',
'sources': ['catalog', 'small-find',],
'match_type': 'exact',
'field_args': {
'label': 'Measurement Uncertainties Comment',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Find Type',
'sources': ['bulk-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Find Type',
'f_uuid': '464b90e2-ce62-4570-bcea-58b7f9b5bb33',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Find Type (Other)',
'sources': ['bulk-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Find Type (Other)',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Object Count',
'sources': ['bulk-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Object Count',
'f_uuid': '84525f14-5e20-4765-a74e-303a5dbb4db8',
'field_type': 'description',
'field_data_type': 'xsd:double',
},
},
{
'source-column': 'Count Type',
'sources': ['bulk-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Count Type',
'f_uuid': 'fa2e0286-de17-45e6-959f-9dab8c8cc5f5',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Count Type (Other)',
'sources': ['bulk-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Count Type (Other)',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'General Description',
'sources': ['bulk-finds', 'locus'],
'match_type': 'exact',
'field_args': {
'label': 'Description',
'f_uuid': '7DBB5CB7-599F-42D5-61EE-1955CF898990',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Date Discovered',
'sources': ['bulk-finds', 'small-finds'],
'match_type': 'exact',
'field_args': {
'label': 'Date Discovered',
'f_uuid': '23ff0204-2b40-47b4-909a-66ec8d150528',
'field_type': 'description',
'field_data_type': 'xsd:date',
},
},
{
'source-column': 'Preliminary Phasing',
'sources': ['locus',],
'match_type': 'exact',
'field_args': {
'label': 'Preliminary Phasing',
'f_uuid': 'c2b40ac1-3b8d-4307-b217-c61732236d68',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Munsell Color',
'sources': ['locus',],
'match_type': 'exact',
'field_args': {
'label': 'Munsell Color',
'f_uuid': '9b99354c-55a2-45e0-9bfd-79bd7f2a801a',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Date Opened',
'sources': ['locus',],
'match_type': 'exact',
'field_args': {
'label': 'Date Opened',
'f_uuid': '0ea21cdb-ffab-4b68-9d47-78b180f08162',
'field_type': 'description',
'field_data_type': 'xsd:date',
},
},
{
'source-column': 'Date Closed',
'sources': ['locus',],
'match_type': 'exact',
'field_args': {
'label': 'Date Closed',
'f_uuid': '99684fbb-55d5-447a-8159-7d54fea80b50',
'field_type': 'description',
'field_data_type': 'xsd:date',
},
},
{
'source-column': 'Trench',
'sources': ['locus',],
'match_type': 'exact',
'field_args': {
'label': 'Trench',
'f_uuid': 'bd3aba0c-672a-4a1e-81ea-5408768ce407',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Field Season',
'sources': ['small-finds',],
'match_type': 'exact',
'field_args': {
'label': 'Year',
'f_uuid': '2C7FE888-C431-4FBD-39F4-38B7D969A811',
'field_type': 'description',
'field_data_type': 'xsd:integer',
},
},
{
'source-column': 'Entry Type',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Entry Type',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Document Type',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Document Type',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Date Documented',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Trench Book Entry Date',
'f_uuid': '8b812e4f-edc4-44f1-a88d-4ad358aaf9aa',
'field_type': 'description',
'field_data_type': 'xsd:date',
},
},
{
'source-column': 'Entry Year',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Entry Year',
'field_type': 'description',
'field_data_type': 'xsd:integer',
},
},
{
'source-column': 'Book Year',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Book Year',
'field_type': 'description',
'field_data_type': 'xsd:integer',
},
},
{
'source-column': 'Start Page',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'Start Page',
'f_uuid': 'BECAD1AF-0245-44E0-CD2A-F2F7BD080443',
'field_type': 'description',
'field_data_type': 'xsd:integer',
},
},
{
'source-column': 'End Page',
'sources': ['trench-book',],
'match_type': 'exact',
'field_args': {
'label': 'End Page',
'f_uuid': '506924AA-B53D-41B5-9D02-9A7929EA6D6D',
'field_type': 'description',
'field_data_type': 'xsd:integer',
},
},
{
'source-column': 'Date Created',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Date Created',
'f_uuid': 'e4671bb6-094d-4001-bb10-32685a168bc1',
'field_type': 'description',
'field_data_type': 'xsd:date',
},
},
{
'source-column': 'Direction or Orientation Notes/Direction Faced in Field',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Direction Faced in Field',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Direction or Orientation Notes/Object Orientation Note',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Object Orientation Note',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Direction or Orientation Notes/Object Orientation Note',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Object Orientation Note',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Image Type',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Image Type',
'f_uuid': 'B8556EAA-CF52-446B-39FA-AE4798C13A6B',
'field_type': 'description',
'field_data_type': 'id',
},
},
{
'source-column': 'Images/Note about Primary Image',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Description',
'f_uuid': '7DBB5CB7-599F-42D5-61EE-1955CF898990',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Images/Supplemental Files/Note about Supplemental Image',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Description',
'f_uuid': '7DBB5CB7-599F-42D5-61EE-1955CF898990',
'field_type': 'description',
'field_data_type': 'xsd:string',
},
},
{
'source-column': 'Media Type',
'sources': ['all-media',],
'match_type': 'exact',
'field_args': {
'label': 'Media Type',
'field_type': 'description',
'field_data_type': 'id',
},
},
]
LINK_REL_PRED_MAPPINGS = {
# This describes mappings between link/relation types extrated and derived from the
# source data from Kobo and predicate_uuid identifiers for use in the Open Context
# Assertions table. This dictionary is keyed by a text string of link/relation types.
# The tuple value for each key expresses the predicate_uuid for the
# subject -> pred[0] -> object relation, as well as the inverse assertion for a relationship
# between an object -> pred[1] -> subject relationship.
'link': (Assertion.PREDICATES_LINK, Assertion.PREDICATES_LINK),
'Is Part of': ('0BB889F9-54DD-4F70-5B63-F5D82425F0DB', 'BD384F1F-FB29-4A9D-7ACA-D8F6B4AF0AF9'),
'Has Part': ('BD384F1F-FB29-4A9D-7ACA-D8F6B4AF0AF9', '0BB889F9-54DD-4F70-5B63-F5D82425F0DB'),
'Previous Entry': ('fd94db54-c6f8-484b-9aa6-e0aacc9d132d', None, ),
'Next Entry': ('50472e1c-2825-47cf-a69c-803b78f8891a', None, ),
'Stratigraphy: Same/Same as Locus': ('254ea71a-ca2b-4568-bced-f82bf12cb2f9', '254ea71a-ca2b-4568-bced-f82bf12cb2f9'),
'Same as': ('254ea71a-ca2b-4568-bced-f82bf12cb2f9', '254ea71a-ca2b-4568-bced-f82bf12cb2f9'),
'Stratigraphy: Contemporary/Contemporary with Locus': ('eee95a2a-c3f8-4637-b67a-f4ff6ea4ee53', 'eee95a2a-c3f8-4637-b67a-f4ff6ea4ee53'),
'Stratigraphy: Above/Above Locus': ('7895f4a8-d7e0-4219-bb47-9eef76c4acc0', '04a9d0b0-0ff8-412e-b134-23f705e666ca'),
'Stratigraphy: Below/Below Locus': ('04a9d0b0-0ff8-412e-b134-23f705e666ca', '7895f4a8-d7e0-4219-bb47-9eef76c4acc0'),
'Stratigraphy: Overlies/Overlies Locus': ('f2fd2edb-4505-447a-9403-13c18150d1d2', None),
'Stratigraphic Relations: Cuts/Cuts Locus': ('0d5daed7-873d-4415-a0eb-3e7ddf7f25f7', None),
'Objects join, refit together': ('5E41E490-0618-4D15-0826-38E3B4681C58', '5E41E490-0618-4D15-0826-38E3B4681C58'),
'Additional ID': ('d58724ee-ecb9-4c2c-87a1-02f853edc2f2', '17012df0-ef2f-41a8-b8d6-ddf5b6687a7e'),
'Associated in Context': ('3d4a7baa-8b52-4363-9a10-3f3a70cf919c', '3d4a7baa-8b52-4363-9a10-3f3a70cf919c'),
'Has Related Trench Book Entry': ('f20e9e2e-246f-4421-b1dd-e31e8b58805c', Assertion.PREDICATES_LINK),
'Related Open Locus': ('b0149b7c-88c8-4913-b6c8-81375239e71f', 'f20e9e2e-246f-4421-b1dd-e31e8b58805c'),
'Related Small Find': (Assertion.PREDICATES_LINK, 'f20e9e2e-246f-4421-b1dd-e31e8b58805c'),
'Initially documented as': ('d58724ee-ecb9-4c2c-87a1-02f853edc2f2', '17012df0-ef2f-41a8-b8d6-ddf5b6687a7e'),
# Added for PC 2019
'Other relation': (Assertion.PREDICATES_LINK, Assertion.PREDICATES_LINK),
'Comparanda, based on form': ('46037eb4-c4b7-432b-bebb-500aff0e4fe6', '46037eb4-c4b7-432b-bebb-500aff0e4fe6'),
'Comparanda, based on motif': ('1c5a1fca-0853-4612-9663-f908d9c081b2', '1c5a1fca-0853-4612-9663-f908d9c081b2'),
}
# ---------------------------------------------------------------------
# CONTEXT (item_type: subjects) RELATED FUNCTIONS
# ---------------------------------------------------------------------
def load_context_row(project_uuid, source_id, row):
"""Loads a context record row into the database"""
parent_man_obj = Manifest.objects.filter(
uuid=row['parent_uuid']
).first()
if parent_man_obj is None:
print('Cannot find parent_uuid {} for uuid {}'.format(
row['parent_uuid'],
row['context_uuid']
)
)
# Skip the rest.
return False
# OK to continue
man_obj = Manifest.objects.filter(
uuid=row['context_uuid']
).first()
if man_obj is None:
man_obj = Manifest()
# Set up the new item in the Manifest
man_obj.uuid = row['context_uuid']
man_obj.source_id = source_id
man_obj.label = row['label']
man_obj.project_uuid = project_uuid
man_obj.item_type = 'subjects'
man_obj.class_uri = row['class_uri']
man_obj.save()
# Just to be sure, make sure this item does not
# have any existing parent relations.
Assertion.objects.filter(
predicate_uuid=Assertion.PREDICATES_CONTAINS,
object_uuid=man_obj.uuid,
).delete()
# Now add a context relation to it.
ass = Assertion()
ass.uuid = parent_man_obj.uuid
ass.subject_type = parent_man_obj.item_type
ass.project_uuid = parent_man_obj.project_uuid
ass.source_id = source_id
ass.obs_node = '#contents-{}'.format(DEFAULT_OBS_NUM)
ass.obs_num = DEFAULT_OBS_NUM
ass.sort = 1
ass.visibility = 1
ass.predicate_uuid = Assertion.PREDICATES_CONTAINS
ass.object_uuid = man_obj.uuid
ass.object_type = man_obj.item_type
ass.save()
sg = SubjectGeneration()
sg.generate_save_context_path_from_uuid(man_obj.uuid)
return True
def load_context_dataframe(
project_uuid,
source_id,
context_df,
class_uri=None,
parent_uuids=None
):
"""Loads the context dataframe"""
if class_uri is not None:
p_index = (
(context_df['class_uri']==class_uri)
& (context_df[DB_LOAD_RESULT_A_COL] != True)
)
elif parent_uuids is not None:
p_index = (
(context_df['parent_uuid'].isin(parent_uuids))
& (context_df[DB_LOAD_RESULT_A_COL] != True)
)
context_df.sort_values(
by=(PATH_CONTEXT_COLS + ['label']),
na_position='first',
inplace=True,
)
existing_man_objs = Manifest.objects.filter(
uuid__in=context_df[p_index]['context_uuid'].unique().tolist()
)
existing_uuids = [m.uuid for m in existing_man_objs]
for i, row in context_df[p_index].iterrows():
uuid = row['context_uuid']
if uuid in existing_uuids:
# This uuid already exists, so do NOT import it.
continue
act_indx = (context_df['context_uuid'] == uuid)
load_ok = load_context_row(project_uuid, source_id, row)
context_df.loc[act_indx, DB_LOAD_RESULT_A_COL] = load_ok
return context_df
def update_contexts_subjects(project_uuid, source_id, all_contexts_df):
"""Updates the database to have subjects and contexts """
print('Unimport project: {}, source: {}'.format(project_uuid, source_id))
unimp = UnImport(source_id, project_uuid)
unimp.delete_ok = True
unimp.delete_all()
update_indx = (
all_contexts_df['parent_uuid'].notnull()
)
new_contexts_df = all_contexts_df[update_indx].copy()
ordered_classes = CLASS_CONTEXT_IMPORT_ORDER.copy()
ordered_classes += [
c for c in new_contexts_df['class_uri'].unique().tolist()
if c not in CLASS_CONTEXT_IMPORT_ORDER
]
new_contexts_df.sort_values(
by=(PATH_CONTEXT_COLS + ['label']),
inplace=True,
na_position='first'
)
new_contexts_df[DB_LOAD_RESULT_A_COL] = np.nan
# First Create records for data with a parent in Open Context
oc_par_index = (new_contexts_df['parent_uuid_source']==UUID_SOURCE_OC_LOOKUP)
parent_uuids = new_contexts_df[oc_par_index]['parent_uuid'].unique().tolist()
print('Loading contexts that are children of {} contexts in DB.'.format(
len(parent_uuids)
)
)
new_contexts_df = load_context_dataframe(
project_uuid,
source_id,
new_contexts_df,
parent_uuids=parent_uuids
)
for class_uri in ordered_classes:
print('Loading contexts for class_uri: {}'.format(
class_uri
)
)
new_contexts_df = load_context_dataframe(
project_uuid,
source_id,
new_contexts_df,
class_uri=class_uri,
)
# Now sort the assertions we just created.
# Now sort the assertions for the items just impacted.
asor = AssertionSorting()
asor.re_rank_assertions_by_source(project_uuid, source_id)
return new_contexts_df
# ---------------------------------------------------------------------
# ATTRIBUTES RELATED FUNCTIONS
# Attributes are loaded into the importer that normally gets data from
# an Open Refine source. The following functions load data from a
# dataframe with attributes, sets up the field types and relationships,
# assigns UUIDs where feasible, and imports the data into Open Context.
# The main expecation is that entities receiving attributes have been
# already created.
# ---------------------------------------------------------------------
def purge_data_from_source(project_uuid, source_id):
print('Unimport project: {}, source: {}'.format(project_uuid, source_id))
unimp = UnImport(source_id, project_uuid)
unimp.delete_ok = True
unimp.delete_all()
print('Unimport data from importer project: {}, source: {}'.format(project_uuid, source_id))
ImportCell.objects.filter(
project_uuid=project_uuid,
source_id=source_id,
).delete()
ImportField.objects.filter(
project_uuid=project_uuid,
source_id=source_id,
).delete()
ImportFieldAnnotation.objects.filter(
project_uuid=project_uuid,
source_id=source_id,
).delete()
ImportSource.objects.filter(
project_uuid=project_uuid,
source_id=source_id,
).delete()
def load_attribute_df_configs(
project_uuid,
source_id,
source_type,
df,
attribute_col_configs=DF_ATTRIBUTE_CONFIGS
):
"""Updates ImportFields with configurations"""
# NOTE: This has the assumption that a column has a "primary key",
# of the main entity that gets description. Descriptions and other
# relationships between columns by default use the "primary key"
# column as subject of a relationship.
defalut_field_args = {
'field_type': 'ignore',
'field_data_type': '',
}
kfs = KoboFields()
cols = df.columns.tolist()
pk_field_num = None
field_rels = []
for field_num, col in enumerate(cols, 1):
if col in kfs.fields:
# This is a kobo metadata field, to be
field_rels.append(
{
'predicate': ImportFieldAnnotation.PRED_METADATA,
'subject_field_num': field_num,
}
)
# Skip fields configured in KoboFields.
continue
field_args = None
field_rel = None
for config in attribute_col_configs:
# Default to ignore
if (source_type in config['sources']
and (col == config['source-column']
or (col.startswith(config['source-column'])
and config['match_type'] == 'startswith')
)
):
field_args = config['field_args'].copy()
if config.get('subject_pk'):
pk_field_num = field_num
if config.get('field_rel'):
field_rel = config['field_rel']
if field_rel.get('predicate') == ImportFieldAnnotation.PRED_MEDIA_PART_OF:
# A media file type column is the subject, the primary key field is obj.
field_rel['subject_field_num'] = field_num
elif field_rel.get('predicate') == ImportFieldAnnotation.PRED_GEO_LOCATION:
# A geospatial type column is the subject, the primary key field is obj.
field_rel['subject_field_num'] = field_num
else:
field_rel['object_field_num'] = field_num
elif field_args.get('field_type') == 'description':
field_rel = {
'predicate': ImportFieldAnnotation.PRED_DESCRIBES,
'subject_field_num': field_num,
}
# Don't break, incase a more specific config
# is waiting.
# Now update the field.
if field_args is None:
# We didn't find any specific config, so we will ignore
# the column.
field_args = defalut_field_args.copy()
else:
print('Found {} config for {}'.format(source_type, col))
# Update the column with configutations
ImportField.objects.filter(
project_uuid=project_uuid,
source_id=source_id,
ref_orig_name=col,
field_num=field_num,
).update(**field_args)
if field_rel is not None:
field_rels.append(field_rel)
# Now add configured relationship annotations between fields
if pk_field_num is None or not len(field_rels):
return None
for field_rel in field_rels:
# Use the specified subject field num, or default to the
# source table's pk_field_num.
subject_field_num = field_rel.get('subject_field_num', pk_field_num)
object_field_num = field_rel.get('object_field_num', pk_field_num)
# Just to be sure, delete prior links between these fields for
# this source.
ImportFieldAnnotation.objects.filter(
source_id=source_id,
project_uuid=project_uuid,
field_num=subject_field_num,
object_field_num=object_field_num,
).delete()
# Now create the linkage
imp_fa = ImportFieldAnnotation()
imp_fa.source_id = source_id
imp_fa.project_uuid = project_uuid
# Use the specified subject field num, or default to the
# source table's pk_field_num.
imp_fa.field_num = subject_field_num
imp_fa.predicate = field_rel['predicate']
imp_fa.predicate_field_num = field_rel.get('predicate_field_num', 0)
imp_fa.object_field_num = object_field_num
imp_fa.save()
def load_attribute_df_into_importer(
project_uuid,
source_id,
source_type,
source_label,
df,
attribute_col_configs=DF_ATTRIBUTE_CONFIGS,
):
"""Loads a dataframe with attribute data into the importer"""
# Purge any data from a prior import attempt from this source.
purge_data_from_source(project_uuid, source_id)
# 1st, make the source object
impsrc = ImportRefineSource()
impsrc.source_id = source_id
impsrc.project_uuid = project_uuid
impsrc.create_new_dataframe_source(source_label, df)
# 2nd, add the fields.
impfields = ImportFields()
impfields.source_id = source_id
impfields.project_uuid = project_uuid
impfields.save_dataframe_fields(source_id, df)
# 3rd, add the record cells
imprecs = ImportRecords()
imprecs.source_id = source_id
imprecs.project_uuid = project_uuid
imprecs.save_dataframe_records(source_id, df)
# Now pre-configure the fields
load_attribute_df_configs(
project_uuid,
source_id,
source_type,
df,
attribute_col_configs=attribute_col_configs,
)
def load_attribute_data_into_oc(
project_uuid,
source_id,
):
fi = FinalizeImport(source_id)
if not fi.project_uuid:
raise RuntimeError('Problem with import source: {}'.format(source_id))
fi.reset_state()
row_count = fi.row_count
import_done = False
print('Start import into Open Context: {}'.format(source_id))
while not import_done:
#fi = FinalizeImport(source_id, (settings.IMPORT_BATCH_SIZE + row_count))
fi = FinalizeImport(source_id)
output = fi.process_current_batch()
for key, val in output.items():
if key == 'details':
continue
print('{} -> {}: {}'.format(source_id, key, val))
import_done = output['done']
continue
print('Number of {} assertions: {}'.format(
source_id,
Assertion.objects.filter(project_uuid=fi.project_uuid, source_id=source_id).count()
)
)
print('Completed import into Open Context: {}'.format(source_id))
def purge_prior_link_rel_import(project_uuid, source_id):
"""Deletes a prior import of linking relation data from a source"""
Assertion.objects.filter(
project_uuid=project_uuid,
source_id=source_id
).delete()
def validate_pred_uuid(predicate_uuid):
"""Validates a predicate_uuid to make sure it is actually usable"""
if predicate_uuid is None:
# We're OK with None, we just skip import.
return True
pred_man = Manifest.objects.filter(uuid=predicate_uuid, item_type='predicates').first()
if pred_man:
return True
pred_ok = Assertion.objects.filter(predicate_uuid=predicate_uuid).first()
if pred_ok:
return True
# We could not validate the use of this predicate uuid.
return False
def add_link_assertion(
project_uuid,
source_id,
subj_man_obj,
predicate_uuid,
obj_man_obj,
obs_num=1,
sort=0,
):
if not subj_man_obj or not obj_man_obj or not predicate_uuid:
# Skip out, we have some None objects, so no assertion
return None
ass = Assertion()
ass.uuid = subj_man_obj.uuid
ass.subject_type = subj_man_obj.item_type
ass.project_uuid = project_uuid
ass.source_id = source_id
ass.obs_node = '#obs-' + str(obs_num)
ass.obs_num = obs_num
ass.sort = sort
ass.visibility = 1
ass.predicate_uuid = predicate_uuid
ass.object_uuid = obj_man_obj.uuid
ass.object_type = obj_man_obj.item_type
try:
ass.save()
return True
except:
return False
def load_link_relations_df_into_oc(
project_uuid,
source_id,
df,
subject_uuid_col='subject_uuid',
link_rel_col=LINK_RELATION_TYPE_COL,
object_uuid_col='object_uuid',
link_rel_pred_mappings=LINK_REL_PRED_MAPPINGS,
):
"""Loads a link relations dataframe into Open Context."""
df[DB_LOAD_RESULT_A_COL] = np.nan
df[DB_LOAD_RESULT_B_COL] = np.nan
# First, purge any prior import of this source
print('Purge any prior import of {} to project_uuid: {}'.format(
source_id,
project_uuid
)
)
purge_prior_link_rel_import(project_uuid, source_id)
# Make a list of all uuids, and associate manifest objects to them, if found
# in a dictionary, uuid_manifest_objs
uuid_manifest_objs = {}
all_uuids = df[df[subject_uuid_col].notnull()][subject_uuid_col].unique().tolist()
all_uuids += df[df[object_uuid_col].notnull()][object_uuid_col].unique().tolist()
for man_obj in Manifest.objects.filter(uuid__in=all_uuids):
uuid_manifest_objs[man_obj.uuid] = man_obj
# Now process the import.
valid_predicte_uuids = {} # validation results for predicate_uuids
link_types = df[df[link_rel_col].notnull()][link_rel_col].unique().tolist()
for link_type in link_types:
if not link_type in link_rel_pred_mappings:
raise RuntimeError('Need to configure predicate(s) for {}: {}'.format(
link_rel_col,
link_type
)
)
# Get a tuple of predicate_uuids from the link_rel_pred_mappings configuration.
# How these get used for assertions (essentially, pred_b is for inverse
# relations):
#
# (1) pred_a is used for: subject_uuid_col -> pred_a -> object_uuid_col
# (2) pred_b is used for: object_uuid_col -> pred_b -> subject_uuid_col
#
pred_a, pred_b = link_rel_pred_mappings[link_type]
if not validate_pred_uuid(pred_a) or not validate_pred_uuid(pred_b):
raise RuntimeError('Unrecognized config uuids for {}:{} -> {}, is ok {}; {}, is ok {}'.format(
link_rel_col,
link_type,
pred_a,
validate_pred_uuid(pred_a),
pred_b,
validate_pred_uuid(pred_b)
)
)
# Filter the dataframe for subj, links, and objects that are not blank.
poss_ass_indx = (
(df[link_rel_col] == link_type)
& (df[subject_uuid_col].notnull())
& (df[object_uuid_col].notnull())
)
if df[poss_ass_indx].empty:
# Skip, we've got some blanks.
continue
# Now proceed with loading.
print('Load {} records for link_type: {}'.format(
len(df[poss_ass_indx].index),
link_type
)
)
for i, row in df[poss_ass_indx].iterrows():
s_man_obj = uuid_manifest_objs.get(row[subject_uuid_col])
o_man_obj = uuid_manifest_objs.get(row[object_uuid_col])
# Add the main link assertion, if applicable
ok_a = add_link_assertion(
project_uuid,
source_id,
s_man_obj,
pred_a,
o_man_obj,
)
# Now add the inverse link relation, if applicable
ok_b = add_link_assertion(
project_uuid,
source_id,
o_man_obj,
pred_b,
s_man_obj,
sort= (i * 0.01)
)
up_indx = (
(df[link_rel_col] == link_type)
& (df[subject_uuid_col] ==row [subject_uuid_col])
& (df[object_uuid_col] == row[object_uuid_col])
)
df.loc[up_indx, DB_LOAD_RESULT_A_COL] = ok_a
df.loc[up_indx, DB_LOAD_RESULT_B_COL] = ok_b
# Now sort the assertions for the items just impacted.
asor = AssertionSorting()
asor.re_rank_assertions_by_source(project_uuid, source_id)
return df | gpl-3.0 |
Mecanon/morphing_wing | dynamic_model/results/flexinol_SMA/config_A/max_deflection/power_usage_2.py | 3 | 11206 | # -*- coding: utf-8 -*-
"""
Analyze the heating, current and power usage of teh actuation
Created on Thu Apr 28 09:56:23 2016
@author: Pedro Leal
"""
import math
import numpy as np
import pickle
import matplotlib.pyplot as plt
#Time step
delta_t = 0.005
sigma_o = 100e6
r = 0.000381/2.
d = 2*r
alpha = 0. #set to zero on purpose
c = 837.36
rho = 6450.
#Transformation strain properties
H_max = 0.0550
H_min = 0.0387
sigma_crit = 0
k = 4.6849e-09
rho_E_M = 0.8e-6 #Dynalloy
rho_E_A = 1.0e-6 #Dynalloy
E_A = 3.7427e+10
E_M = 8.8888e+10
C_A = 7.9498e+06
C_M = 7.1986e+06
M_s = 363.5013
M_f = 297.9735
A_s = 324.6427
A_f = 385.0014
n1 = 0.1752
n2 = 0.1789
n3 = 0.1497
n4 = 0.2935
sigma_cal = 200E6
#Load data
Data = pickle.load(open( "data.p", "rb" ))
sigma = Data['sigma']
T = Data['T']
xi = Data['xi']
eps_s = Data['eps_s']
L_s = Data['L_s']
T_o = T[0]
n = len(eps_s)
#==============================================================================
# Calculate output work
#==============================================================================
W_list = []
deltaW_list = []
Total_work = 0
total_work_list = []
for i in range(1, n):
delta_eps = abs(eps_s[i] - eps_s[i-1])
delta_sigma = abs(sigma[i] - sigma[i-1])
# avg_eps = abs(eps_s[i] + eps_s[i-1])/2.
# avg_sigma = abs(sigma[i] + sigma[i-1])/2.
av_eps = (eps_s[i] + eps_s[i-1])/2.
av_sigma = (sigma[i] + sigma[i-1])/2.
dW = math.pi*r**2*L_s[0]*0.5*(sigma[i]+sigma[i-1])*delta_eps
deltaW = math.pi*r**2*L_s[0]*(eps_s[i]*delta_sigma/delta_t + sigma[i]*delta_eps/delta_t)
W_list.append(dW)
deltaW_list.append(deltaW)
Total_work += deltaW
total_work_list.append(Total_work)
Total_work = sum(W_list)*delta_t
Total_delta = sum(deltaW_list)*delta_t
#print Total_delta
plt.figure()
plt.plot(eps_s)
plt.figure()
plt.plot(sigma)
#==============================================================================
# Calculate input heat for different h
#==============================================================================
h_list = np.linspace(0,100., 6)
P_h_list = []
total_power_list = []
for j in range(len(h_list)):
h = h_list[j]
P_list = []
I_list = []
a = 0
b = 0
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_xi = xi[i] - xi[i-1]
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
2.*h*(T[i] - T_o))))
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
print P, T[i]*alpha*delta_sigma, rho*c*delta_T, delta_xi*(-pi_t + rho_delta_s0*T[i])
a += rho*c*delta_T
b += delta_xi*(-pi_t + rho_delta_s0*T[i])
# print a,b
I_list.append(I)
P_list.append(P)
P_h_list.append(P_list)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*P_list[i]
total_power_list.append(Total_power)
t = np.linspace(0,(n-2)*delta_t, n-1)
#plt.figure()
#plt.plot(t, I_list, 'b')
#plt.scatter(t, I_list, c = 'b')
#plt.xlabel('Time (s)')
#plt.ylabel('Current (A)')
#plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
# min(I_list) - 0.02*(max(I_list)-min(I_list)),
# max(I_list) + 0.02*(max(I_list)-min(I_list))])
#plt.grid()
plt.figure()
for i in range(len(h_list)):
color=((1.-float(i)/(len(h_list)-1), float(i)/(len(h_list)-1),0, 1.))
plt.plot(t, P_h_list[i], label = 'h = ' + str(h_list[i]), color = color)
plt.plot(t, deltaW_list, 'b', label = '$\dot{W}$')
#plt.plot(t, W_list, 'b', label = '$\dot{W}$')
#plt.scatter(t, P_list, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Power (W)')
#plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
# min(P_list) - 0.02*(max(P_list)-min(P_list)),
# max(P_list) + 0.02*(max(P_list)-min(P_list))])
plt.grid()
plt.legend(loc= 'upper left')
plt.figure()
plt.plot(h_list, total_power_list)
plt.xlabel('Convection coefficient')
plt.ylabel('Total power consumption (J)')
plt.grid()
plt.figure()
plt.plot(h_list, 100.*Total_delta/np.array(total_power_list))
plt.xlabel('Convection coefficient $h$ ')
plt.ylabel('Efficiency (%)')
plt.grid()
print 'Total adiabatic power is %f Joules' % total_power_list[0]
print 'Total work is %f Joules' % Total_delta
print 'Adiabatic efficiency is %f ' % (Total_delta/total_power_list[0])
#==============================================================================
# Calculate input heat for different delta_t
#==============================================================================
delta_t_list = np.linspace(0.001,0.05, 50)
#h = 10.
h_dt_power_list = []
for i in range(len(h_list)):
h = h_list[i]
total_power_list = []
for j in range(len(delta_t_list)):
delta_t = delta_t_list[j]
P_list = []
I_list = []
a = 0
b = 0
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_xi = xi[i] - xi[i-1]
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
2.*h*(T[i] - T_o))))
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
a += rho*c*delta_T
b += delta_xi*(-pi_t + rho_delta_s0*T[i])
# print a,b
I_list.append(I)
P_list.append(P)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*P_list[i]
total_power_list.append(Total_power)
h_dt_power_list.append(total_power_list)
t = np.linspace(0,(n-2)*delta_t, n-1)
plt.figure()
for i in range(len(h_list)):
# print len(h_dt_power_list)
color=((1.-float(i)/(len(h_list)-1), float(i)/(len(h_list)-1),0, 1.))
plt.plot(delta_t_list, 100.*Total_delta/np.array(h_dt_power_list[i]),
color = color, label= 'h = %.f' % h_list[i])
plt.xlabel('$\Delta t$ ')
plt.ylabel('Efficiency (%)')
plt.grid()
plt.legend(loc='best')
#==============================================================================
# Calculate heat input for different T_o
#==============================================================================
delta_t = 0.05
h = 10. #invented (adiabatic)
T_list = np.linspace(200,300., 5)
P_T_list = []
total_power_list = []
for j in range(len(T_list)):
T_o = T_list[j]
P_list = []
I_list = []
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_xi = xi[i] - xi[i-1]
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
# I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
# rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
# 2.*h*(T[i] - T_o))))
#
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
I_list.append(I)
P_list.append(P)
P_T_list.append(P_list)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*(P_list[i] + P_list[i+1])/2.
total_power_list.append(Total_power)
plt.figure()
for i in range(len(T_list)):
color = ((1.-float(i)/(len(T_list)-1), float(i)/(len(T_list)-1),0, 1.))
plt.plot(t, P_T_list[i], 'b', label = '$T_o$ = ' + str(T_list[i]), color = color)
#plt.scatter(t, P_list, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Power (W)')
#plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
# min(P_list) - 0.02*(max(P_list)-min(P_list)),
# max(P_list) + 0.02*(max(P_list)-min(P_list))])
plt.grid()
plt.legend(loc= 'upper left')
plt.figure()
plt.plot(T_list, total_power_list)
plt.xlabel('Temperature (K)')
plt.ylabel('Total power consumption (J)')
plt.grid() | mit |
leggitta/mne-python | examples/connectivity/plot_mne_inverse_connectivity_spectrum.py | 18 | 3465 | """
==============================================================
Compute full spectrum source space connectivity between labels
==============================================================
The connectivity is computed between 4 labels across the spectrum
between 5 and 40 Hz.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = Raw(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Read some labels
names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name)
for name in names]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin, fmax = 5., 40.
sfreq = raw.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq,
fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=2)
n_rows, n_cols = con.shape[:2]
fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True)
plt.suptitle('Between labels connectivity')
for i in range(n_rows):
for j in range(i + 1):
if i == j:
axes[i, j].set_axis_off()
continue
axes[i, j].plot(freqs, con[i, j, :])
axes[j, i].plot(freqs, con[i, j, :])
if j == 0:
axes[i, j].set_ylabel(names[i])
axes[0, i].set_title(names[i])
if i == (n_rows - 1):
axes[i, j].set_xlabel(names[j])
axes[i, j].set_xlim([fmin, fmax])
axes[j, i].set_xlim([fmin, fmax])
# Show band limits
for f in [8, 12, 18, 35]:
axes[i, j].axvline(f, color='k')
axes[j, i].axvline(f, color='k')
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
grundgruen/zipline | zipline/protocol.py | 1 | 17544 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
import numpy as np
from .utils.enum import enum
from .utils.math_utils import nanstd, nanmean, nansum
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be an asset identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _cache_daily_minutely(self, days, fn):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
def minute_get_bars(days):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
env = get_algo_instance().trading_environment
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# NOTE: This silently adds these two entries to the `__dict__`
# without affecting the `__len__` of the object. This is important
# because we use the `len` of the `SIDData` object to see if we have
# data for this asset.
self._initial_len += 2
# Not actually recursive because we have already cached the new method.
return getattr(self, fn)(days)
def _get_bars(self, bars):
return self._cache_daily_minutely(bars, fn='_get_bars')
def _get_max_bars(self, bars):
return self._cache_daily_minutely(bars, fn='_get_max_bars')
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
| apache-2.0 |
davidgardenier/frbpoppy | frbpoppy/misc.py | 1 | 4119 | """Convenience functions."""
import inspect
import sys
import numpy as np
from scipy.integrate import quad
from scipy.stats import chi2, norm
def pprint(*s, output=True):
"""Hack to make for more informative print statements."""
f = inspect.stack()[1][1].split('/')[-1]
m = '{:13.13} |'.format(f)
if output:
print(m, *s)
else:
lines = []
for e in s:
lines.append('\n'.join([f'{m} {f}' for f in e.split('\n')]))
return '\n'.join(lines)
def progressbar(it, prefix="", size=69, file=sys.stdout):
"""Progressbar from adapted from Stack Overflow.
Args:
it (generator): range of values
prefix (str): Words displayed before the progress bar
size (int): Display width
file: Where to direct output
Returns:
type: Description of returned object.
"""
count = len(it)
size -= len(prefix)
def show(j):
x = int((size)*j/count)
print(f'{prefix} [{"#"*x}{"."*(size-x)}] {j}/{count}')
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.flush()
def hist(parameter, bin_type='lin', n_bins=25, norm='max', edges=True,
bins=None):
"""Bin up a parameter either in a lin or log space.
Why is this not a standard option in numpy or matplotlib?
Args:
parameter (array): To be binned
bin_type (str): Either 'lin', 'log' or 'ln'
n_bins (int): Number of bins. Can be overriden internally
norm (bool): Whether to normalise to 'max' or 'prob' or none
Returns:
tuple: bin centers, values per bin
"""
if isinstance(parameter, list):
parameter = np.array(parameter)
if len(parameter) == 0:
return np.nan, np.nan
# Drop NaN-values
parameter = parameter[~(np.isnan(parameter) | np.isinf(parameter))]
# Determine number of bins
if n_bins != 25:
pass
elif len(parameter) < 50:
n_bins = 15
elif len(parameter) > 500:
n_bins = 50
# Determine type of binning
if bin_type == 'lin':
_bins = n_bins
elif bin_type == 'log':
min_f = np.log10(np.min(parameter[parameter != 0]))
max_f = np.log10(max(parameter))
_bins = np.logspace(min_f, max_f, n_bins)
elif bin_type == 'ln':
min_f = np.log(np.min(parameter[parameter != 0]))
max_f = np.log(max(parameter))
_bins = np.logspace(min_f, max_f, n_bins, base=np.e)
# Allow for custom bins
if bins is not None:
_bins = bins
# Allow for probability weighting
weights = None
if norm == 'prob':
weights = np.ones(len(parameter)) / len(parameter)
# Bin
n, bin_edges = np.histogram(parameter, bins=_bins, weights=weights)
if norm == 'max':
n = n/max(n) # Normalise
# Centre bins
bins = (bin_edges[:-1] + bin_edges[1:]) / 2
# Ensure there are edges on the outer bins of the histograms
if edges:
if bin_type == 'lin':
bin_dif = np.diff(bins)[-1]
bins = np.insert(bins, 0, bins[0] - bin_dif)
bins = np.insert(bins, len(bins), bins[-1] + bin_dif)
n = np.insert(n, 0, 0)
n = np.insert(n, len(n), 0)
else:
bin_dif = np.diff(np.log10(bins))[-1]
bins = np.insert(bins, 0, 10**(np.log10(bins[0])-bin_dif))
bins = np.insert(bins, len(bins), 10**(np.log10(bins[-1])+bin_dif))
n = np.insert(n, 0, 0)
n = np.insert(n, len(n), 0)
return bins, n
def poisson_interval(k, sigma=1):
"""
Use chi-squared info to get the poisson interval.
Given a number of observed events, which range of observed events would
have been just as likely given a particular interval?
Based off https://stackoverflow.com/questions/14813530/
poisson-confidence-interval-with-numpy
"""
gauss = norm(0, 1).pdf
a = 1 - quad(gauss, -sigma, sigma, limit=1000)[0]
low, high = (chi2.ppf(a/2, 2*k) / 2, chi2.ppf(1-a/2, 2*k + 2) / 2)
if k == 0:
low = 0.0
return low, high
| mit |
emon10005/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
SaikWolf/gnuradio | gr-filter/examples/synth_to_chan.py | 18 | 3875 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6, fh=0.925*(4*fs)/2.0)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps)/nchans)
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in xrange(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = scipy.blackman
#winfunc = scipy.hamming
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pylab.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
gfyoung/pandas | pandas/tests/strings/test_string_array.py | 1 | 3130 | import numpy as np
import pytest
from pandas._libs import lib
import pandas as pd
from pandas import DataFrame, Series, _testing as tm
def test_string_array(any_string_method):
method_name, args, kwargs = any_string_method
if method_name == "decode":
pytest.skip("decode requires bytes.")
data = ["a", "bb", np.nan, "ccc"]
a = Series(data, dtype=object)
b = Series(data, dtype="string")
expected = getattr(a.str, method_name)(*args, **kwargs)
result = getattr(b.str, method_name)(*args, **kwargs)
if isinstance(expected, Series):
if expected.dtype == "object" and lib.is_string_array(
expected.dropna().values,
):
assert result.dtype == "string"
result = result.astype(object)
elif expected.dtype == "object" and lib.is_bool_array(
expected.values, skipna=True
):
assert result.dtype == "boolean"
result = result.astype(object)
elif expected.dtype == "bool":
assert result.dtype == "boolean"
result = result.astype("bool")
elif expected.dtype == "float" and expected.isna().any():
assert result.dtype == "Int64"
result = result.astype("float")
elif isinstance(expected, DataFrame):
columns = expected.select_dtypes(include="object").columns
assert all(result[columns].dtypes == "string")
result[columns] = result[columns].astype(object)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"method,expected",
[
("count", [2, None]),
("find", [0, None]),
("index", [0, None]),
("rindex", [2, None]),
],
)
def test_string_array_numeric_integer_array(method, expected):
s = Series(["aba", None], dtype="string")
result = getattr(s.str, method)("a")
expected = Series(expected, dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method,expected",
[
("isdigit", [False, None, True]),
("isalpha", [True, None, False]),
("isalnum", [True, None, True]),
("isdigit", [False, None, True]),
],
)
def test_string_array_boolean_array(method, expected):
s = Series(["a", None, "1"], dtype="string")
result = getattr(s.str, method)()
expected = Series(expected, dtype="boolean")
tm.assert_series_equal(result, expected)
def test_string_array_extract():
# https://github.com/pandas-dev/pandas/issues/30969
# Only expand=False & multiple groups was failing
a = Series(["a1", "b2", "cc"], dtype="string")
b = Series(["a1", "b2", "cc"], dtype="object")
pat = r"(\w)(\d)"
result = a.str.extract(pat, expand=False)
expected = b.str.extract(pat, expand=False)
assert all(result.dtypes == "string")
result = result.astype(object)
tm.assert_equal(result, expected)
def test_str_get_stringarray_multiple_nans():
s = Series(pd.array(["a", "ab", pd.NA, "abc"]))
result = s.str.get(2)
expected = Series(pd.array([pd.NA, pd.NA, pd.NA, "c"]))
tm.assert_series_equal(result, expected)
| bsd-3-clause |
djgagne/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 155 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
amolkahat/pandas | pandas/tests/extension/base/reduce.py | 2 | 1908 | import warnings
import pytest
import pandas.util.testing as tm
import pandas as pd
from .base import BaseExtensionTests
class BaseReduceTests(BaseExtensionTests):
"""
Reduction specific tests. Generally these only
make sense for numeric/boolean operations.
"""
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(s.astype('float64'), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
class BaseNoReduceTests(BaseReduceTests):
""" we don't define any reductions """
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
s = pd.Series(data)
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
op_name = all_boolean_reductions
s = pd.Series(data)
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
class BaseNumericReduceTests(BaseReduceTests):
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
s = pd.Series(data)
# min/max with empty produce numpy warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
self.check_reduce(s, op_name, skipna)
class BaseBooleanReduceTests(BaseReduceTests):
@pytest.mark.parametrize('skipna', [True, False])
def test_reduce_series(self, data, all_boolean_reductions, skipna):
op_name = all_boolean_reductions
s = pd.Series(data)
self.check_reduce(s, op_name, skipna)
| bsd-3-clause |
PG-TUe/tpot | tpot/config/classifier.py | 1 | 6159 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
# Check the TPOT documentation for information on the structure of config dicts
classifier_config_dict = {
# Classifiers
'sklearn.naive_bayes.GaussianNB': {
},
'sklearn.naive_bayes.BernoulliNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.naive_bayes.MultinomialNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.tree.DecisionTreeClassifier': {
'criterion': ["gini", "entropy"],
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21)
},
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ["gini", "entropy"],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [100],
'criterion': ["gini", "entropy"],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.ensemble.GradientBoostingClassifier': {
'n_estimators': [100],
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'subsample': np.arange(0.05, 1.01, 0.05),
'max_features': np.arange(0.05, 1.01, 0.05)
},
'sklearn.neighbors.KNeighborsClassifier': {
'n_neighbors': range(1, 101),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
'sklearn.svm.LinearSVC': {
'penalty': ["l1", "l2"],
'loss': ["hinge", "squared_hinge"],
'dual': [True, False],
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.]
},
'sklearn.linear_model.LogisticRegression': {
'penalty': ["l1", "l2"],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'dual': [True, False]
},
'xgboost.XGBClassifier': {
'n_estimators': [100],
'max_depth': range(1, 11),
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'subsample': np.arange(0.05, 1.01, 0.05),
'min_child_weight': range(1, 21),
'nthread': [1]
},
# Preprocesssors
'sklearn.preprocessing.Binarizer': {
'threshold': np.arange(0.0, 1.01, 0.05)
},
'sklearn.decomposition.FastICA': {
'tol': np.arange(0.0, 1.01, 0.05)
},
'sklearn.cluster.FeatureAgglomeration': {
'linkage': ['ward', 'complete', 'average'],
'affinity': ['euclidean', 'l1', 'l2', 'manhattan', 'cosine', 'precomputed']
},
'sklearn.preprocessing.MaxAbsScaler': {
},
'sklearn.preprocessing.MinMaxScaler': {
},
'sklearn.preprocessing.Normalizer': {
'norm': ['l1', 'l2', 'max']
},
'sklearn.kernel_approximation.Nystroem': {
'kernel': ['rbf', 'cosine', 'chi2', 'laplacian', 'polynomial', 'poly', 'linear', 'additive_chi2', 'sigmoid'],
'gamma': np.arange(0.0, 1.01, 0.05),
'n_components': range(1, 11)
},
'sklearn.decomposition.PCA': {
'svd_solver': ['randomized'],
'iterated_power': range(1, 11)
},
'sklearn.preprocessing.PolynomialFeatures': {
'degree': [2],
'include_bias': [False],
'interaction_only': [False]
},
'sklearn.kernel_approximation.RBFSampler': {
'gamma': np.arange(0.0, 1.01, 0.05)
},
'sklearn.preprocessing.RobustScaler': {
},
'sklearn.preprocessing.StandardScaler': {
},
'tpot.builtins.ZeroCount': {
},
'tpot.builtins.OneHotEncoder': {
'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25],
'sparse': [False]
},
# Selectors
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': np.arange(0.05, 1.01, 0.05)
},
'sklearn.feature_selection.RFE': {
'step': np.arange(0.05, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
},
'sklearn.feature_selection.SelectFromModel': {
'threshold': np.arange(0, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
}
}
| lgpl-3.0 |
Nyker510/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
Atzingen/controleForno-interface | imagens/bind.py | 1 | 1237 | # -*- coding: latin-1 -*-
import numpy as np
import cv2
from matplotlib import pyplot as plt
perfil = cv2.imread('temperatura.jpg')
forno = cv2.imread('forno-pre.jpg')
col_perfil, lin_perfil, _ = perfil.shape
col_forno, lin_forno, _ = forno.shape
print 'perfil antes:', lin_perfil, col_perfil, 'forno:', lin_forno, col_forno
perfil = perfil[7:-25,27:]
b,g,r = cv2.split(perfil) # get b,g,r
perfil = cv2.merge([r,g,b]) # switch it to rgb
perfil = cv2.resize(perfil,(lin_forno, col_forno))
col_perfil, lin_perfil, _ = perfil.shape
print 'perfil depois:', lin_perfil, col_perfil
lin = lin_perfil
col = col_perfil
pts1 = np.float32([[0,0],[0,col],[lin,col],[lin,0]])
p1, p2, p3, p4 = [70,120], [320,200], [780,55], [600,20]
pts2 = np.float32([p1, p2, p3, p4])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst2 = cv2.warpPerspective(perfil,M,(lin,col))
dst = cv2.warpPerspective(perfil,M,(lin,col))
print dst.shape, forno.shape
f = cv2.addWeighted(dst,0.3,forno,0.7,0)
plt.subplot(221),plt.imshow(perfil),plt.title('Input'), plt.colorbar()
plt.subplot(222),plt.imshow(dst),plt.title('Output')
plt.subplot(223),plt.imshow(f),plt.title('Final')
plt.subplot(224),plt.imshow(dst2),plt.title('rwa-pers')
plt.show()
plt.show()
| mit |
RichHelle/data-science-from-scratch | scratch/visualization.py | 3 | 4696 | from matplotlib import pyplot as plt
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
# plt.show()
plt.savefig('im/viz_gdp.png')
plt.gca().clear()
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# plot bars with left x-coordinates [0, 1, 2, 3, 4], heights [num_oscars]
plt.bar(range(len(movies)), num_oscars)
plt.title("My Favorite Movies") # add a title
plt.ylabel("# of Academy Awards") # label the y-axis
# label x-axis with movie names at bar centers
plt.xticks(range(len(movies)), movies)
# plt.show()
plt.savefig('im/viz_movies.png')
plt.gca().clear()
from collections import Counter
grades = [83, 95, 91, 87, 70, 0, 85, 82, 100, 67, 73, 77, 0]
# Bucket grades by decile, but put 100 in with the 90s
histogram = Counter(min(grade // 10 * 10, 90) for grade in grades)
plt.bar([x + 5 for x in histogram.keys()], # Shift bars right by 5
histogram.values(), # Give each bar its correct height
10, # Give each bar a width of 8
edgecolor=(0, 0, 0)) # Black edges for each bar
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
# plt.show()
plt.savefig('im/viz_grades.png')
plt.gca().clear()
mentions = [500, 505]
years = [2017, 2018]
plt.bar(years, mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
# misleading y-axis only shows the part above 500
plt.axis([2016.5, 2018.5, 499, 506])
plt.title("Look at the 'Huge' Increase!")
# plt.show()
plt.savefig('im/viz_misleading_y_axis.png')
plt.gca().clear()
plt.bar(years, mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
plt.ticklabel_format(useOffset=False)
plt.axis([2016.5, 2018.5, 0, 550])
plt.title("Not So Huge Anymore")
# plt.show()
plt.savefig('im/viz_non_misleading_y_axis.png')
plt.gca().clear()
variance = [1, 2, 4, 8, 16, 32, 64, 128, 256]
bias_squared = [256, 128, 64, 32, 16, 8, 4, 2, 1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = [i for i, _ in enumerate(variance)]
# We can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# Because we've assigned labels to each series,
# we can get a legend for free (loc=9 means "top center")
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.xticks([])
plt.title("The Bias-Variance Tradeoff")
# plt.show()
plt.savefig('im/viz_line_chart.png')
plt.gca().clear()
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # Put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
# plt.show()
plt.savefig('im/viz_scatterplot.png')
plt.gca().clear()
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.title("Axes Aren't Comparable")
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
# plt.show()
plt.savefig('im/viz_scatterplot_axes_not_comparable.png')
plt.gca().clear()
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.title("Axes Are Comparable")
plt.axis("equal")
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
plt.savefig('im/viz_scatterplot_axes_comparable.png')
plt.gca().clear()
| unlicense |
Winand/pandas | pandas/tests/io/parser/na_values.py | 6 | 10530 | # -*- coding: utf-8 -*-
"""
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
import numpy as np
from numpy import nan
import pandas.io.common as com
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
class NAvaluesTests(object):
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = np.array([['foo', 'bar'], [nan, 'baz'], [nan, nan]],
dtype=np.object_)
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_non_string_na_values(self):
# see gh-3611: with an odd float format, we can't match
# the string '999.0' exactly but still need float matching
nice = """A,B
-999,1.2
2,-999
3,4.5
"""
ugly = """A,B
-999,1.200
2,-999.000
3,4.500
"""
na_values_param = [['-999.0', '-999'],
[-999, -999.0],
[-999.0, -999],
['-999.0'], ['-999'],
[-999.0], [-999]]
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan],
[3.0, 4.5]], columns=['A', 'B'])
for data in (nice, ugly):
for na_values in na_values_param:
out = self.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(out, expected)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null',
'NaN', 'nan', '-NaN', '-nan', '#N/A N/A', ''])
assert _NA_VALUES == com._NA_VALUES
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = np.array([[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]])
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_numpy_array_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_numpy_array_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_numpy_array_equal(df3.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
def test_na_values_keep_default(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_na_values_na_filter_override(self):
data = """\
A,B
1,A
nan,B
3,C
"""
expected = DataFrame([[1, 'A'], [np.nan, np.nan], [3, 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=True)
tm.assert_frame_equal(out, expected)
expected = DataFrame([['1', 'A'], ['nan', 'B'], ['3', 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=False)
tm.assert_frame_equal(out, expected)
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
assert result['Date'][1] == '2012-05-12'
assert result['UnitPrice'].isna().all()
def test_na_values_scalar(self):
# see gh-12224
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[np.nan, 2.0], [2.0, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=1)
tm.assert_frame_equal(out, expected)
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names,
na_values={'a': 2, 'b': 1})
tm.assert_frame_equal(out, expected)
def test_na_values_dict_aliasing(self):
na_values = {'a': 2, 'b': 1}
na_values_copy = na_values.copy()
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(out, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(self):
# see gh-14203
data = 'a\nfoo\n1'
na_values = {0: 'foo'}
out = self.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({'a': [np.nan, 1]})
tm.assert_frame_equal(out, expected)
def test_na_values_uint64(self):
# see gh-14983
na_values = [2**63]
data = str(2**63) + '\n' + str(2**63 + 1)
expected = DataFrame([str(2**63), str(2**63 + 1)])
out = self.read_csv(StringIO(data), header=None, na_values=na_values)
tm.assert_frame_equal(out, expected)
data = str(2**63) + ',1' + '\n,2'
expected = DataFrame([[str(2**63), 1], ['', 2]])
out = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(out, expected)
def test_empty_na_values_no_default_with_index(self):
# see gh-15835
data = "a,1\nb,2"
expected = DataFrame({'1': [2]}, index=Index(["b"], name="a"))
out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0)
tm.assert_frame_equal(out, expected)
| bsd-3-clause |
hvasbath/beat | beat/heart.py | 1 | 115730 | """
Core module with functions to calculate Greens Functions and synthetics.
Also contains main classes for setup specific parameters.
"""
import os
import logging
import shutil
import copy
from time import time
from collections import OrderedDict
from beat import psgrn, pscmp, utility, qseis2d
from theano import config as tconfig
from theano import shared
import numpy as num
from scipy import linalg
from pyrocko.guts import (Dict, Object, String, StringChoice,
Float, Int, Tuple, List, Bool)
from pyrocko.guts_array import Array
from pyrocko import crust2x2, gf, cake, orthodrome, trace, util
from pyrocko.cake import GradientLayer
from pyrocko.fomosto import qseis, qssp
from pyrocko.model import gnss
# from pyrocko.fomosto import qseis2d
logger = logging.getLogger('heart')
c = 299792458. # [m/s]
km = 1000.
d2r = num.pi / 180.
r2d = 180. / num.pi
near_field_threshold = 9. # [deg] below that surface waves are calculated
nanostrain = 1e-9
lambda_sensors = {
'Envisat': 0.056, # needs updating- no ressource file
'ERS1': 0.05656461471698113,
'ERS2': 0.056, # needs updating
'JERS': 0.23513133960784313,
'RadarSat2': 0.055465772433}
def log_determinant(A, inverse=False):
"""
Calculates the natural logarithm of a determinant of the given matrix '
according to the properties of a triangular matrix.
Parameters
----------
A : n x n :class:`numpy.ndarray`
inverse : boolean
If true calculates the log determinant of the inverse of the colesky
decomposition, which is equvalent to taking the determinant of the
inverse of the matrix.
L.T* L = R inverse=False
L-1*(L-1)T = R-1 inverse=True
Returns
-------
float logarithm of the determinant of the input Matrix A
"""
cholesky = linalg.cholesky(A, lower=True)
if inverse:
cholesky = num.linalg.inv(cholesky)
return num.log(num.diag(cholesky)).sum() * 2.
class ReferenceLocation(gf.Location):
"""
Reference Location for Green's Function store calculations!
"""
station = String.T(
default='Store_Name',
help='This mimics the station.station attribute which determines the'
' store name!')
class Covariance(Object):
"""
Covariance of an observation. Holds data and model prediction uncertainties
for one observation object.
"""
data = Array.T(
shape=(None, None),
dtype=tconfig.floatX,
help='Data covariance matrix',
optional=True)
pred_g = Array.T(
shape=(None, None),
dtype=tconfig.floatX,
help='Model prediction covariance matrix, fault geometry',
optional=True)
pred_v = Array.T(
shape=(None, None),
dtype=tconfig.floatX,
help='Model prediction covariance matrix, velocity model',
optional=True)
def __init__(self, **kwargs):
self.slog_pdet = shared(0., name='cov_normalisation', borrow=True)
Object.__init__(self, **kwargs)
self.update_slog_pdet()
def covs_supported(self):
return ['pred_g', 'pred_v', 'data']
def check_matrix_init(self, cov_mat_str=''):
"""
Check if matrix is initialised and if not set with zeros of size data.
"""
if cov_mat_str not in self.covs_supported():
raise NotImplementedError(
'Covariance term %s not supported' % cov_mat_str)
cov_mat = getattr(self, cov_mat_str)
if cov_mat is None:
cov_mat = num.zeros_like(self.data, dtype=tconfig.floatX)
if cov_mat.size != self.data.size:
if cov_mat.sum() == 0.:
cov_mat = num.zeros_like(self.data, dtype=tconfig.floatX)
else:
raise ValueError(
'%s covariances defined but size '
'inconsistent!' % cov_mat_str)
setattr(self, cov_mat_str, cov_mat)
@property
def p_total(self):
self.check_matrix_init('pred_g')
self.check_matrix_init('pred_v')
return self.pred_g + self.pred_v
@property
def inverse(self):
"""
Add and invert ALL uncertainty covariance Matrices.
"""
Cx = self.p_total + self.data
if Cx.sum() == 0:
raise ValueError('No covariances given!')
else:
return num.linalg.inv(Cx).astype(tconfig.floatX)
@property
def inverse_p(self):
"""
Add and invert different MODEL uncertainty covariance Matrices.
"""
if self.p_total.sum() == 0:
raise ValueError('No model covariance defined!')
return num.linalg.inv(self.p_total).astype(tconfig.floatX)
@property
def inverse_d(self):
"""
Invert DATA covariance Matrix.
"""
if self.data is None:
raise AttributeError('No data covariance matrix defined!')
return num.linalg.inv(self.data).astype(tconfig.floatX)
@property
def chol(self):
"""
Cholesky factor, of ALL uncertainty covariance matrices.
"""
Cx = self.p_total + self.data
if Cx.sum() == 0:
raise ValueError('No covariances given!')
else:
return linalg.cholesky(Cx, lower=True).astype(tconfig.floatX)
@property
def chol_inverse(self):
"""
Cholesky factor, upper right of the Inverse of the Covariance matrix of
sum of ALL uncertainty covariance matrices.
To be used as weight in the optimization.
Notes
-----
Uses QR factorization on the inverse of the upper right Cholesky
decomposed covariance matrix to obtain a proxy for the Cholesky
decomposition of the inverse of the covariance matrix in case the
inverse of the covariance matrix is not positive definite.
From:
https://math.stackexchange.com/questions/3838462/cholesky-factors-of-covariance-and-precision-matrix/3842840#3842840
Returns
-------
upper triangle of the cholesky decomposition
"""
try:
return num.linalg.cholesky(
self.inverse).T.astype(tconfig.floatX)
except num.linalg.LinAlgError:
inverse_chol = num.linalg.inv(self.chol.T)
_, chol_ur = num.linalg.qr(inverse_chol.T)
return chol_ur.astype(tconfig.floatX) # return upper right
@property
def log_pdet(self):
"""
Calculate the log of the determinant of the total matrix.
"""
ldet_x = num.log(num.diag(self.chol)).sum() * 2.
return utility.scalar2floatX(ldet_x)
def update_slog_pdet(self):
"""
Update shared variable with current log_norm_factor (lnf)
(for theano models).
"""
self.slog_pdet.set_value(self.log_pdet)
self.slog_pdet.astype(tconfig.floatX)
class ArrivalTaper(trace.Taper):
"""
Cosine arrival Taper.
"""
a = Float.T(default=-15.,
help='start of fading in; [s] w.r.t. phase arrival')
b = Float.T(default=-10.,
help='end of fading in; [s] w.r.t. phase arrival')
c = Float.T(default=50.,
help='start of fading out; [s] w.r.t. phase arrival')
d = Float.T(default=55.,
help='end of fading out; [s] w.r.t phase arrival')
def check_sample_rate_consistency(self, deltat):
"""
Check if taper durations are consistent with GF sample rate.
"""
for chop_b in (['b', 'c'], ['a', 'd']):
duration = self.duration(chop_b)
ratio = duration / deltat
utility.error_not_whole(
ratio,
errstr='Taper duration %g of %s is inconsistent with'
' sampling rate of %g! Please adjust Taper values!' % (
duration, utility.list2string(chop_b), deltat))
def duration(self, chop_bounds=['b', 'c']):
t0 = getattr(self, chop_bounds[0])
t1 = getattr(self, chop_bounds[1])
return t1 - t0
def nsamples(self, sample_rate, chop_bounds=['b', 'c']):
"""
Returns the number of samples a tapered trace would have given
its sample rate and chop_bounds
Parameters
----------
sample_rate : float
"""
return int(num.ceil(sample_rate * self.duration(chop_bounds)))
@property
def fadein(self):
return self.b - self.a
@property
def fadeout(self):
return self.d - self.c
def get_pyrocko_taper(self, arrival_time):
"""
Get pyrocko CosTaper object that may be applied to trace operations.
Parameters
----------
arrival_time : float
[s] of the reference time around which the taper will be applied
Returns
-------
:class:`pyrocko.trace.CosTaper`
"""
if not self.a < self.b < self.c < self.d:
raise ValueError('Taper values violate: a < b < c < d')
return trace.CosTaper(
arrival_time + self.a,
arrival_time + self.b,
arrival_time + self.c,
arrival_time + self.d)
class Trace(Object):
pass
class FilterBase(Object):
pass
class Filter(FilterBase):
"""
Filter object defining frequency range of traces after time-domain
filtering.
"""
lower_corner = Float.T(
default=0.001,
help='Lower corner frequency')
upper_corner = Float.T(
default=0.1,
help='Upper corner frequency')
order = Int.T(
default=4,
help='order of filter, the higher the steeper')
stepwise = Bool.T(
default=True,
help='If set to true the bandpass filter is done it two'
' consecutive steps, first high-pass then low-pass.')
def apply(self, trace):
# filter traces
# stepwise
if self.stepwise:
logger.debug('Stepwise HP LP filtering')
trace.highpass(
corner=self.lower_corner,
order=self.order,
demean=True)
trace.lowpass(
corner=self.upper_corner,
order=self.order,
demean=False)
else:
logger.debug('Single BP filtering')
trace.bandpass(
corner_hp=self.lower_corner,
corner_lp=self.upper_corner,
order=self.order)
class BandstopFilter(FilterBase):
"""
Filter object defining suppressed frequency range of traces after
time-domain filtering.
"""
lower_corner = Float.T(
default=0.12,
help='Lower corner frequency')
upper_corner = Float.T(
default=0.25,
help='Upper corner frequency')
order = Int.T(
default=4,
help='order of filter, the higher the steeper')
def apply(self, trace):
logger.debug('single bandstop filtering')
trace.bandstop(
corner_hp=self.lower_corner,
corner_lp=self.upper_corner,
order=self.order, demean=False)
class FrequencyFilter(FilterBase):
freqlimits = Tuple.T(
4, Float.T(),
default=(0.005, 0.006, 166., 200.),
help='Corner frequencies 4-tuple [Hz] for frequency domain filter.')
tfade = Float.T(
default=20.,
help='Rise/fall time in seconds of taper applied in timedomain at both'
' ends of trace.')
def apply(self, trace):
trace = trace.transfer(
self.tfade, self.freqlimits, invert=False, cut_off_fading=False)
class ResultPoint(Object):
"""
Containing point in solution space.
"""
post_llk = String.T(
optional=True,
help='describes which posterior likelihood value the point belongs to')
point = Dict.T(
String.T(),
Array.T(
serialize_as='list',
dtype=tconfig.floatX),
default={},
help='Point in Solution space for which result is produced.')
variance_reductions = Dict.T(
String.T(),
Float.T(),
default={},
optional=True,
help='Variance reductions for each dataset.')
class SeismicResult(Object):
"""
Result object assembling different traces of misfit.
"""
point = ResultPoint.T(default=ResultPoint.D())
processed_obs = Trace.T(optional=True)
# processed_syn = Trace.T(optional=True)
# processed_res = Trace.T(optional=True)
arrival_taper = trace.Taper.T(optional=True)
llk = Float.T(default=0., optional=True)
taper = trace.Taper.T(optional=True)
source_contributions = List.T(
Trace.T(),
help='synthetics of source individual contributions.')
@property
def processed_syn(self):
if self.source_contributions is not None:
tr0 = copy.deepcopy(self.source_contributions[0])
tr0.ydata = num.zeros_like(tr0.ydata)
for tr in self.source_contributions:
tr0.ydata += tr.ydata
return tr0
@property
def processed_res(self):
tr = copy.deepcopy(self.processed_obs)
syn_tmin = self.processed_syn.tmin
if tr.tmin == syn_tmin:
tr.set_ydata(
self.processed_obs.get_ydata() - self.processed_syn.get_ydata())
else:
logger.warning(
'Observed and synthetic result traces for %s '
'have different tmin values! '
'obs: %g syn: %g, difference: %f. '
'Residual may be invalid!' % (
utility.list2string(tr.nslc_id),
tr.tmin, syn_tmin, tr.tmin - syn_tmin))
return tr
def results_for_export(results, datatype=None, attributes=None):
if attributes is None:
if datatype is None:
raise ValueError(
'Either datatype or attributes need to be defined!')
elif datatype == 'geodetic' or datatype == 'seismic':
attributes = ['processed_obs', 'processed_syn', 'processed_res']
else:
raise NotImplementedError(
'datatype %s not implemented!' % datatype)
for attribute in attributes:
try:
data = [getattr(result, attribute) for result in results]
except AttributeError:
raise AttributeError(
'Result object does not have the attribute '
'"%s" to export!' % attribute)
yield data, attribute
sqrt2 = num.sqrt(2.)
physical_bounds = dict(
east_shift=(-500., 500.),
north_shift=(-500., 500.),
depth=(0., 1000.),
strike=(-90., 420.),
strike1=(-90., 420.),
strike2=(-90., 420.),
dip=(-45., 135.),
dip1=(-45., 135.),
dip2=(-45., 135.),
rake=(-180., 270.),
rake1=(-180., 270.),
rake2=(-180., 270.),
mix=(0, 1),
diameter=(0., 100.),
volume_change=(-1e12, 1e12),
mnn=(-sqrt2, sqrt2),
mee=(-sqrt2, sqrt2),
mdd=(-sqrt2, sqrt2),
mne=(-1., 1.),
mnd=(-1., 1.),
med=(-1., 1.),
exx=(-500., 500.),
eyy=(-500., 500.),
exy=(-500., 500.),
rotation=(-500., 500.),
w=(-3. / 8. * num.pi, 3. / 8. * num.pi),
v=(-1. / 3, 1. / 3.),
kappa=(0., 2 * num.pi),
sigma=(-num.pi / 2., num.pi / 2.),
h=(0., 1.),
length=(0., 7000.),
width=(0., 500.),
slip=(0., 150.),
nucleation_x=(-1., 1.),
nucleation_y=(-1., 1.),
opening_fraction=(-1., 1.),
magnitude=(-5., 10.),
time=(-300., 300.),
time_shift=(-40., 40.),
delta_time=(0., 100.),
delta_depth=(0., 300.),
distance=(0., 300.),
duration=(0., 600.),
peak_ratio=(0., 1.),
durations=(0., 600.),
uparr=(-1., 150.),
uperp=(-150., 150.),
utens=(-150., 150.),
nucleation_strike=(0., num.inf),
nucleation_dip=(0., num.inf),
velocities=(0., 20.0),
azimuth=(0, 360),
amplitude=(1., 10e25),
bl_azimuth=(0, 360),
bl_amplitude=(0., 0.2),
locking_depth=(0.1, 100.),
hypers=(-20., 20.),
ramp=(-0.01, 0.01),
offset=(-1.0, 1.0),
lat=(-90., 90.),
lon=(-180., 180.),
omega=(-10., 10.))
def list_repeat(arr, repeat=1):
if isinstance(repeat, list):
if len(repeat) != arr.size:
raise ValueError(
'Inconsistent requested dimensions! '
'repeat: {}, {} array size'.format(repeat, arr.size))
else:
out_values = []
for i, re in enumerate(repeat):
out_values.append(num.repeat(arr[i], re))
return num.hstack(out_values)
else:
return num.tile(arr, repeat)
class Parameter(Object):
"""
Optimization parameter determines the bounds of the search space.
"""
name = String.T(default='depth')
form = String.T(default='Uniform',
help='Type of prior distribution to use. Options:'
' "Uniform", ...')
lower = Array.T(shape=(None,),
dtype=tconfig.floatX,
serialize_as='list',
default=num.array([0., 0.], dtype=tconfig.floatX))
upper = Array.T(shape=(None,),
dtype=tconfig.floatX,
serialize_as='list',
default=num.array([1., 1.], dtype=tconfig.floatX))
testvalue = Array.T(shape=(None,),
dtype=tconfig.floatX,
serialize_as='list',
default=num.array([0.5, 0.5], dtype=tconfig.floatX))
def validate_bounds(self):
supported_vars = list(physical_bounds.keys())
if self.name not in supported_vars:
candidate = self.name.split('_')[-1]
if candidate in supported_vars:
name = candidate
elif self.name[0:2] == 'h_':
name = 'hypers'
elif self.name[0:11] == 'time_shifts':
name = 'time_shift'
else:
raise TypeError(
'The parameter "%s" cannot'
' be optimized for!' % self.name)
else:
name = self.name
phys_b = physical_bounds[name]
if self.lower is not None:
for i in range(self.dimension):
if self.upper[i] < self.lower[i]:
raise ValueError(
'The upper parameter bound for'
' parameter "%s" must be higher than the lower'
' bound' % self.name)
if self.testvalue[i] > self.upper[i] or \
self.testvalue[i] < self.lower[i]:
raise ValueError(
'The testvalue of parameter "%s" at index "%i" has to'
' be within the upper and lower bounds, please adjust!'
% (self.name, i))
if self.upper[i] > phys_b[1] or \
self.lower[i] < phys_b[0]:
raise ValueError(
'The parameter bounds (%f, %f) for "%s" are outside of'
' physically meaningful values (%f, %f)!' % (
self.lower[i], self.upper[i], self.name,
phys_b[0], phys_b[1]))
else:
raise ValueError(
'Parameter bounds for "%s" have to be defined!' % self.name)
def get_upper(self, repeat=1):
if self.upper.size != num.sum(repeat):
return list_repeat(self.upper, repeat)
else:
return self.upper
def get_lower(self, repeat=1):
if self.lower.size != num.sum(repeat):
return list_repeat(self.lower, repeat)
else:
return self.lower
def get_testvalue(self, repeat=1):
if self.testvalue.size != num.sum(repeat):
return list_repeat(self.testvalue, repeat)
else:
return self.testvalue
def random(self, shape=None):
"""
Create random samples within the parameter bounds.
Parameters
----------
shape : int or list
of int number of draws from distribution
Returns
-------
:class:`numpy.ndarray` of size (n, m)
"""
if shape is None:
shape = self.dimension
lower = self.get_lower(shape)
rands = num.random.rand(num.sum(shape))
try:
return (self.get_upper(shape) - lower) * rands + lower
except ValueError:
raise ValueError(
'Value inconsistency shapes: {} parameter '
'dimension {}'.format(shape, self.dimension))
@property
def dimension(self):
return self.lower.size
def bound_to_array(self):
return num.array([self.lower, self.testval, self.upper],
dtype=num.float)
class DynamicTarget(gf.Target):
response = trace.PoleZeroResponse.T(default=None, optional=True)
def update_response(self, magnification, damping, period):
z, p, k = proto2zpk(
magnification, damping, period, quantity='displacement')
# b, a = zpk2tf(z, p, k)
if self.response:
self.response.zeros = z
self.response.poles = p
self.response.constant = k
else:
logger.debug('Initializing new response!')
self.response = trace.PoleZeroResponse(
zeros=z, poles=p, constant=k)
def update_target_times(self, sources=None, taperer=None):
"""
Update the target attributes tmin and tmax to do the stacking
only in this interval. Adds twice taper fade in time to each taper
side.
Parameters
----------
source : list
containing :class:`pyrocko.gf.seismosizer.Source` Objects
taperer : :class:`pyrocko.trace.CosTaper`
"""
if sources is None or taperer is None:
self.tmin = None
self.tmax = None
else:
tolerance = 2 * (taperer.b - taperer.a)
self.tmin = taperer.a - tolerance
self.tmax = taperer.d + tolerance
class SeismicDataset(trace.Trace):
"""
Extension to :class:`pyrocko.trace.Trace` to have
:class:`Covariance` as an attribute.
"""
wavename = None
covariance = None
@property
def samples(self):
if self.covariance.data is not None:
return self.covariance.data.shape[0]
else:
logger.warn(
'Dataset has no uncertainties! Return full data length!')
return self.data_len()
def set_wavename(self, wavename):
self.wavename = wavename
@property
def typ(self):
return self.wavename + '_' + self.channel
@classmethod
def from_pyrocko_trace(cls, trace, **kwargs):
d = dict(
tmin=trace.tmin,
tmax=trace.tmax,
ydata=trace.ydata,
station=trace.station,
location=trace.location,
channel=trace.channel,
network=trace.network,
deltat=trace.deltat)
return cls(**d)
def __getstate__(self):
return (self.network, self.station, self.location, self.channel,
self.tmin, self.tmax, self.deltat, self.mtime,
self.ydata, self.meta, self.wavename, self.covariance)
def __setstate__(self, state):
self.network, self.station, self.location, self.channel, \
self.tmin, self.tmax, self.deltat, self.mtime, \
self.ydata, self.meta, self.wavename, self.covariance = state
self._growbuffer = None
self._update_ids()
class GeodeticDataset(gf.meta.MultiLocation):
"""
Overall geodetic data set class
"""
typ = String.T(
default='SAR',
help='Type of geodetic data, e.g. SAR, GNSS, ...')
name = String.T(
default='A',
help='e.g. GNSS campaign name or InSAR satellite track ')
def __init__(self, **kwargs):
self.has_correction = False
self.corrections = None
super(GeodeticDataset, self).__init__(**kwargs)
def get_corrections(self, hierarchicals, point=None):
"""
Needs to be specified on inherited dataset classes.
"""
raise NotImplementedError('Needs implementation in subclass')
def setup_corrections(self, event, correction_configs):
"""
Initialise geodetic dataset corrections such as Ramps or Euler Poles.
"""
self.corrections = []
self.update_local_coords(event)
for number, corr_conf in enumerate(correction_configs):
corr = corr_conf.init_correction()
if self.name in corr_conf.dataset_names and corr_conf.enabled:
logger.info(
'Setting up %s correction for %s' % (
corr_conf.feature, self.name))
locx_name, locy_name = corr.get_required_coordinate_names()
locx = getattr(self, locx_name)
locy = getattr(self, locy_name)
data_mask = self.get_data_mask(corr_conf)
corr.setup_correction(
locy=locy, locx=locx, los_vector=self.los_vector,
data_mask=data_mask,
dataset_name=self.name, number=number)
self.corrections.append(corr)
self.has_correction = True
else:
logger.info(
'Not correcting %s for %s' % (
self.name, corr_conf.feature))
def update_local_coords(self, loc):
"""
Calculate local coordinates with respect to given Location.
Parameters
----------
loc : :class:`pyrocko.gf.meta.Location`
Returns
-------
:class:`numpy.ndarray` (n_points, 3)
"""
self.north_shifts, self.east_shifts = orthodrome.latlon_to_ne_numpy(
loc.lat, loc.lon, self.lats, self.lons)
return self.north_shifts, self.east_shifts
def get_distances_to_event(self, loc):
north_shifts, east_shifts = orthodrome.latlon_to_ne_numpy(
loc.lat, loc.lon, self.lats, self.lons)
return num.sqrt(north_shifts ** 2 + east_shifts ** 2)
@property
def samples(self):
if self.lats is not None:
n = self.lats.size
elif self.north_shifts is not None:
n = self.north_shifts.size
else:
raise ValueError('No coordinates defined!')
return n
class GNSSCompoundComponent(GeodeticDataset):
"""
Collecting many GNSS components and merging them into arrays.
Make synthetics generation more efficient.
"""
los_vector = Array.T(shape=(None, 3), dtype=num.float, optional=True)
displacement = Array.T(shape=(None,), dtype=num.float, optional=True)
component = String.T(
default='east',
help='direction of measurement, north/east/up')
stations = List.T(gnss.GNSSStation.T(optional=True))
covariance = Covariance.T(
optional=True,
help=':py:class:`Covariance` that holds data'
'and model prediction covariance matrixes')
odw = Array.T(
shape=(None,),
dtype=num.float,
help='Overlapping data weights, additional weight factor to the'
'dataset for overlaps with other datasets',
optional=True)
def __init__(self, **kwargs):
self._station2index = None
super(GNSSCompoundComponent, self).__init__(**kwargs)
def update_los_vector(self):
if self.component == 'east':
c = num.array([0, 1, 0])
elif self.component == 'north':
c = num.array([1, 0, 0])
elif self.component == 'up':
c = num.array([0, 0, 1])
else:
raise ValueError('Component %s not supported' % self.component)
self.los_vector = num.tile(c, self.samples).reshape(self.samples, 3)
if num.isnan(self.los_vector).any():
raise ValueError(
'There are Nan values in LOS vector for dataset: %s! '
'Please check source of imported data!' % self.name)
return self.los_vector
def __str__(self):
s = 'GNSS\n compound: \n'
s += ' component: %s\n' % self.component
if self.lats is not None:
s += ' number of stations: %i\n' % self.samples
return s
def to_kite_scene(self, bins=(600, 600)):
from kite.scene import Scene, SceneConfig
from scipy.stats import binned_statistic_2d
bin_disp, bin_lat, bin_lon, _ = binned_statistic_2d(
self.lats, self.lons, self.displacement,
statistic='mean', bins=bins)
logger.debug('Setting up the Kite Scene')
config = SceneConfig()
config.frame.llLat = bin_lat.min()
config.frame.llLon = bin_lon.min()
config.frame.dE = bin_lon[1] - bin_lon[0]
config.frame.dN = bin_lat[1] - bin_lat[0]
config.frame.spacing = 'degree'
config.meta.scene_title = '%s %s' % (self.name, self.component)
config.meta.scene_id = self.name
los_vec = self.los_vector[0]
theta_rad = num.arccos(los_vec[2])
theta_bin = num.full_like(bin_disp, theta_rad * 180 / num.pi)
theta_bin[num.isnan(bin_disp)] = num.nan
if theta_rad == 0:
phi_rad = 0.
else:
phi_rad = num.arcsin(los_vec[1] / num.sin(theta_rad))
phi_bin = num.full_like(bin_disp, phi_rad * 180 / num.pi)
phi_bin[num.isnan(theta_bin)] = num.nan
scene = Scene(
theta=theta_bin,
phi=phi_bin,
displacement=bin_disp,
config=config)
return scene
def get_data_mask(self, corr_config):
s2idx = self.station_name_index_mapping()
if len(corr_config.station_whitelist) > 0 and \
len(corr_config.station_blacklist) > 0:
raise ValueError('Either White or Blacklist can be defined!')
station_blacklist_idxs = []
if corr_config.station_blacklist:
for code in corr_config.station_blacklist:
try:
station_blacklist_idxs.append(s2idx[code])
except KeyError:
logger.warning(
'Blacklisted station %s not in dataset,'
' skipping ...' % code)
elif corr_config.station_whitelist:
for station_name in s2idx.keys():
if station_name not in corr_config.station_whitelist:
station_blacklist_idxs.append(s2idx[station_name])
logger.info(
'Stations with idxs %s got blacklisted!' %
utility.list2string(station_blacklist_idxs))
return num.array(station_blacklist_idxs)
def station_name_index_mapping(self):
if self._station2index is None:
self._station2index = dict(
(station.code, i) for (i, station) in enumerate(
self.stations))
return self._station2index
@classmethod
def from_pyrocko_gnss_campaign(
cls, campaign, components=['north', 'east', 'up']):
valid_components = ['north', 'east', 'up']
compounds = []
for comp in components:
logger.info('Loading "%s" GNSS component' % comp)
if comp not in valid_components:
raise ValueError(
'Component: %s not available! '
'Valid GNSS components are: %s' % (
comp, utility.list2string(valid_components)))
comp_stations = []
components = []
for st in campaign.stations:
try:
components.append(st.components[comp])
comp_stations.append(st)
except KeyError:
logger.warngin(
'No data for GNSS station: {}'.format(st.code))
lats, lons = num.array(
[loc.effective_latlon for loc in comp_stations]).T
vs = num.array([c.shift for c in components])
variances = num.power(
num.array([c.sigma for c in components]), 2)
compounds.append(cls(
name=campaign.name,
typ='GNSS',
stations=comp_stations,
displacement=vs,
covariance=Covariance(data=num.eye(lats.size) * variances),
lats=lats,
lons=lons,
east_shifts=num.zeros_like(lats),
north_shifts=num.zeros_like(lats),
component=comp,
odw=num.ones_like(lats.size)))
return compounds
class ResultReport(Object):
solution_point = Dict.T(help='result point')
post_llk = StringChoice.T(
choices=['max', 'mean', 'min'],
default='max',
help='Value of point of the likelihood distribution.')
mean_point = Dict.T(
optional=True,
default=None,
help='mean of distributions, used for model'
' prediction covariance calculation.')
class IFG(GeodeticDataset):
"""
Interferogram class as a dataset in the optimization.
"""
master = String.T(optional=True,
help='Acquisition time of master image YYYY-MM-DD')
slave = String.T(optional=True,
help='Acquisition time of slave image YYYY-MM-DD')
amplitude = Array.T(shape=(None,), dtype=num.float, optional=True)
wrapped_phase = Array.T(shape=(None,), dtype=num.float, optional=True)
incidence = Array.T(shape=(None,), dtype=num.float, optional=True)
heading = Array.T(shape=(None,), dtype=num.float, optional=True)
los_vector = Array.T(shape=(None, 3), dtype=num.float, optional=True)
satellite = String.T(default='Envisat')
def __str__(self):
s = 'IFG\n Acquisition Track: %s\n' % self.name
s += ' timerange: %s - %s\n' % (self.master, self.slave)
if self.lats is not None:
s += ' number of pixels: %i\n' % self.samples
return s
@property
def wavelength(self):
return lambda_sensors[self.satellite]
def update_los_vector(self, force=False):
"""
Calculate LOS vector for given attributes incidence and heading angles.
Returns
-------
:class:`numpy.ndarray` (n_points, 3)
"""
if self.los_vector is None or force:
if self.incidence is None and self.heading is None:
raise AttributeError(
'Incidence and Heading need to be provided!')
Su = num.cos(num.deg2rad(self.incidence))
Sn = - num.sin(num.deg2rad(self.incidence)) * \
num.cos(num.deg2rad(self.heading - 270))
Se = - num.sin(num.deg2rad(self.incidence)) * \
num.sin(num.deg2rad(self.heading - 270))
self.los_vector = num.array([Sn, Se, Su], dtype=num.float).T
if num.isnan(self.los_vector).any():
raise ValueError(
'There are Nan values in LOS vector for dataset: %s! '
'Please check source of imported data!' % self.name)
return self.los_vector
else:
return self.los_vector
class DiffIFG(IFG):
"""
Differential Interferogram class as geodetic target for the calculation
of synthetics and container for SAR data.
"""
unwrapped_phase = Array.T(shape=(None,), dtype=num.float, optional=True)
coherence = Array.T(shape=(None,), dtype=num.float, optional=True)
reference_point = Tuple.T(2, Float.T(), optional=True)
reference_value = Float.T(optional=True, default=0.0)
displacement = Array.T(shape=(None,), dtype=num.float, optional=True)
covariance = Covariance.T(
optional=True,
help=':py:class:`Covariance` that holds data'
'and model prediction covariance matrixes')
odw = Array.T(
shape=(None,),
dtype=num.float,
help='Overlapping data weights, additional weight factor to the'
'dataset for overlaps with other datasets',
optional=True)
mask = Array.T(
shape=(None,),
dtype=num.bool,
help='Mask values for Euler pole region determination. '
'Click polygon mask in kite!',
optional=True)
def export_to_csv(self, filename, displacement=None):
logger.debug('Exporting dataset as csv to %s', filename)
if displacement is None:
displacement = self.displacement
with open(filename, mode='w') as f:
f.write(
'lat[deg], lon[deg], incidence[deg], heading[deg], '
'displacement[m]\n')
for lat, lon, inci, head, dis in zip(
self.lats, self.lons,
self.incidence, self.heading, displacement):
f.write(
'{}, {}, {}, {}, {} \n'.format(
lat, lon, inci, head, dis))
@classmethod
def from_kite_scene(cls, scene, **kwargs):
name = os.path.basename(scene.meta.filename)
logger.info(
'Attempting to access the full covariance matrix of the kite'
' scene %s. If this is not precalculated it will be calculated '
'now, which may take a significant amount of time...' % name)
covariance = Covariance(data=scene.covariance.covariance_matrix)
if scene.quadtree.frame.isDegree():
lats = num.empty(scene.quadtree.nleaves)
lons = num.empty(scene.quadtree.nleaves)
lats.fill(scene.quadtree.frame.llLat)
lons.fill(scene.quadtree.frame.llLon)
lons += scene.quadtree.leaf_eastings
lats += scene.quadtree.leaf_northings
elif scene.quadtree.frame.isMeter():
loce = scene.quadtree.leaf_eastings
locn = scene.quadtree.leaf_northings
lats, lons = orthodrome.ne_to_latlon(
lat0=scene.frame.llLat, lon0=scene.frame.llLon,
north_m=locn, east_m=loce)
if hasattr(scene, 'polygon_mask'):
polygons = scene.polygon_mask.polygons
else:
polygons = None
mask = num.full(lats.size, False)
if polygons:
logger.info('Found polygon mask in %s! Importing for Euler Pole'
' correction ...' % name)
from matplotlib.path import Path
leaf_idxs_rows = scene.quadtree.leaf_northings / scene.frame.dN
leaf_idxs_cols = scene.quadtree.leaf_eastings / scene.frame.dE
points = num.vstack([leaf_idxs_cols, leaf_idxs_rows]).T
for vertices in polygons.values(): # vertexes [cols, rows]
p = Path(vertices)
mask |= p.contains_points(points)
else:
logger.info('No polygon mask in %s!' % name)
d = dict(
name=name,
displacement=scene.quadtree.leaf_means,
lons=lons,
lats=lats,
covariance=covariance,
incidence=90 - num.rad2deg(scene.quadtree.leaf_thetas),
heading=-num.rad2deg(scene.quadtree.leaf_phis) + 180,
odw=num.ones_like(scene.quadtree.leaf_phis),
mask=mask)
return cls(**d)
def get_data_mask(self, corr_conf):
"""
Extracts mask from kite scene and returns mask indexes-
maybe during import?!!!
"""
if corr_conf.feature == 'Euler Pole':
logger.info('Masking data for Euler Pole estimation!')
return self.mask
else:
return None
class GeodeticResult(Object):
"""
Result object assembling different geodetic data.
"""
point = ResultPoint.T(default=ResultPoint.D())
processed_obs = Array.T(shape=(None,), dtype=num.float, optional=True)
processed_syn = Array.T(shape=(None,), dtype=num.float, optional=True)
processed_res = Array.T(shape=(None,), dtype=num.float, optional=True)
llk = Float.T(default=0., optional=True)
def init_seismic_targets(
stations, earth_model_name='ak135-f-average.m', channels=['T', 'Z'],
sample_rate=1.0, crust_inds=[0], interpolation='multilinear',
reference_location=None):
"""
Initiate a list of target objects given a list of indexes to the
respective GF store velocity model variation index (crust_inds).
Parameters
----------
stations : List of :class:`pyrocko.model.Station`
List of station objects for which the targets are being initialised
earth_model_name = str
Name of the earth model that has been used for GF calculation.
channels : List of str
Components of the traces to be optimized for if rotated:
T - transversal, Z - vertical, R - radial
If not rotated:
E - East, N- North, U - Up (Vertical)
sample_rate : scalar, float
sample rate [Hz] of the Greens Functions to use
crust_inds : List of int
Indexes of different velocity model realisations, 0 - reference model
interpolation : str
Method of interpolation for the Greens Functions, can be 'multilinear'
or 'nearest_neighbor'
reference_location : :class:`ReferenceLocation` or
:class:`pyrocko.model.Station`
if given, targets are initialised with this reference location
Returns
-------
List of :class:`DynamicTarget`
"""
if reference_location is None:
store_prefixes = [
copy.deepcopy(station.station) for station in stations]
else:
store_prefixes = [
copy.deepcopy(reference_location.station)
for station in stations]
em_name = get_earth_model_prefix(earth_model_name)
targets = []
for sta_num, station in enumerate(stations):
for channel in channels:
for crust_ind in crust_inds:
cha = station.get_channel(channel)
if cha is None:
logger.warning(
'Channel "%s" for station "%s" does not exist!'
% (channel, station.station))
else:
targets.append(DynamicTarget(
quantity='displacement',
codes=(station.network,
station.station,
'%i' % crust_ind, channel), # n, s, l, c
lat=station.lat,
lon=station.lon,
azimuth=cha.azimuth,
dip=cha.dip,
interpolation=interpolation,
store_id=get_store_id(
store_prefixes[sta_num],
em_name,
sample_rate,
crust_ind)))
return targets
def get_store_id(prefix, earth_model_name, sample_rate, crust_ind=0):
return '%s_%s_%.3fHz_%s' % (
prefix, earth_model_name, sample_rate, crust_ind)
def init_geodetic_targets(
datasets, earth_model_name='ak135-f-average.m',
interpolation='nearest_neighbor', crust_inds=[0],
sample_rate=0.0):
"""
Initiate a list of Static target objects given a list of indexes to the
respective GF store velocity model variation index (crust_inds).
Parameters
----------
datasets : list
of :class:`heart.GeodeticDataset` for which the targets are being
initialised
earth_model_name = str
Name of the earth model that has been used for GF calculation.
sample_rate : scalar, float
sample rate [Hz] of the Greens Functions to use
crust_inds : List of int
Indexes of different velocity model realisations, 0 - reference model
interpolation : str
Method of interpolation for the Greens Functions, can be 'multilinear'
or 'nearest_neighbor'
Returns
-------
List of :class:`pyrocko.gf.targets.StaticTarget`
"""
em_name = get_earth_model_prefix(earth_model_name)
targets = [gf.StaticTarget(
lons=d.lons,
lats=d.lats,
interpolation=interpolation,
quantity='displacement',
store_id=get_store_id('statics', em_name, sample_rate, crust_ind))
for crust_ind in crust_inds for d in datasets]
return targets
def vary_model(
earthmod, error_depth=0.1, error_velocities=0.1,
depth_limit_variation=600 * km):
"""
Vary depths and velocities in the given source model by Gaussians with
given 2-sigma errors [percent]. Ensures increasing velocity with depth.
Stops variating the input model at the given depth_limit_variation [m].
Mantle discontinuity uncertainties are hardcoded based on
Mooney et al. 1981 and Woodward et al.1991
Parameters
----------
earthmod : :class:`pyrocko.cake.LayeredModel`
Earthmodel defining layers, depth, velocities, densities
error_depth : scalar, float
2 sigma error in percent of the depth for the respective layers
error_velocities : scalar, float
2 sigma error in percent of the velocities for the respective layers
depth_limit_variations : scalar, float
depth threshold [m], layers with depth > than this are not varied
Returns
-------
Varied Earthmodel : :class:`pyrocko.cake.LayeredModel`
Cost : int
Counts repetitions of cycles to ensure increasing layer velocity,
unlikely velocities have high Cost
Cost of up to 20 are ok for crustal profiles.
"""
new_earthmod = copy.deepcopy(earthmod)
layers = new_earthmod.layers()
last_l = None
cost = 0
deltaz = 0
# uncertainties in discontinuity depth after Shearer 1991
discont_unc = {
'410': 3 * km,
'520': 4 * km,
'660': 8 * km}
# uncertainties in velocity for upper and lower mantle from Woodward 1991
# and Mooney 1989
mantle_vel_unc = {
'100': 0.05, # above 100
'200': 0.03, # above 200
'400': 0.01} # above 400
for layer in layers:
# stop if depth_limit_variation is reached
if depth_limit_variation:
if layer.ztop >= depth_limit_variation:
layer.ztop = last_l.zbot
# assign large cost if previous layer has higher velocity
if layer.mtop.vp < last_l.mtop.vp or \
layer.mtop.vp > layer.mbot.vp:
cost = 1000
# assign large cost if layer bottom depth smaller than top
if layer.zbot < layer.ztop:
cost = 1000
break
repeat = 1
count = 0
while repeat:
if count > 1000:
break
# vary layer velocity
# check for layer depth and use hardcoded uncertainties
for l_depth, vel_unc in mantle_vel_unc.items():
if float(l_depth) * km < layer.ztop:
error_velocities = vel_unc
logger.debug('Velocity error: %f ', error_velocities)
deltavp = float(
num.random.normal(
0, layer.mtop.vp * error_velocities / 3., 1))
if layer.ztop == 0:
layer.mtop.vp += deltavp
layer.mbot.vs += (deltavp / layer.mbot.vp_vs_ratio())
# ensure increasing velocity with depth
if last_l:
# gradient layer without interface
if layer.mtop.vp == last_l.mbot.vp:
if layer.mbot.vp + deltavp < layer.mtop.vp:
count += 1
else:
layer.mbot.vp += deltavp
layer.mbot.vs += (
deltavp / layer.mbot.vp_vs_ratio())
repeat = 0
cost += count
elif layer.mtop.vp + deltavp / 10 < last_l.mbot.vp:
count += 1
else:
layer.mtop.vp += deltavp
layer.mtop.vs += (deltavp / layer.mtop.vp_vs_ratio())
if isinstance(layer, GradientLayer):
layer.mbot.vp += deltavp
layer.mbot.vs += (deltavp / layer.mbot.vp_vs_ratio())
repeat = 0
cost += count
else:
repeat = 0
# vary layer depth
layer.ztop += deltaz
repeat = 1
# use hard coded uncertainties for mantle discontinuities
if '%i' % (layer.zbot / km) in discont_unc:
factor_d = discont_unc['%i' % (layer.zbot / km)] / layer.zbot
else:
factor_d = error_depth
while repeat:
# ensure that bottom of layer is not shallower than the top
deltaz = float(
num.random.normal(
0, layer.zbot * factor_d / 3., 1)) # 3 sigma
layer.zbot += deltaz
if layer.zbot < layer.ztop:
layer.zbot -= deltaz
count += 1
else:
repeat = 0
cost += count
last_l = copy.deepcopy(layer)
return new_earthmod, cost
def ensemble_earthmodel(ref_earthmod, num_vary=10, error_depth=0.1,
error_velocities=0.1, depth_limit_variation=600 * km):
"""
Create ensemble of earthmodels that vary around a given input earth model
by a Gaussian of 2 sigma (in Percent 0.1 = 10%) for the depth layers
and for the p and s wave velocities. Vp / Vs is kept unchanged
Parameters
----------
ref_earthmod : :class:`pyrocko.cake.LayeredModel`
Reference earthmodel defining layers, depth, velocities, densities
num_vary : scalar, int
Number of variation realisations
error_depth : scalar, float
3 sigma error in percent of the depth for the respective layers
error_velocities : scalar, float
3 sigma error in percent of the velocities for the respective layers
depth_limit_variation : scalar, float
depth threshold [m], layers with depth > than this are not varied
Returns
-------
List of Varied Earthmodels :class:`pyrocko.cake.LayeredModel`
"""
earthmods = []
i = 0
while i < num_vary:
new_model, cost = vary_model(
ref_earthmod,
error_depth,
error_velocities,
depth_limit_variation)
if cost > 20:
logger.debug('Skipped unlikely model %f' % cost)
else:
i += 1
earthmods.append(new_model)
return earthmods
def get_velocity_model(
location, earth_model_name, crust_ind=0, gf_config=None,
custom_velocity_model=None):
"""
Get velocity model at the specified location, combines given or crustal
models with the global model.
Parameters
----------
location : :class:`pyrocko.meta.Location`
earth_model_name : str
Name of the base earth model to be used, check
:func:`pyrocko.cake.builtin_models` for alternatives,
default ak135 with medium resolution
crust_ind : int
Index to set to the Greens Function store, 0 is reference store
indexes > 0 use reference model and vary its parameters by a Gaussian
gf_config : :class:`beat.config.GFConfig`
custom_velocity_model : :class:`pyrocko.cake.LayeredModel`
Returns
-------
:class:`pyrocko.cake.LayeredModel`
"""
gfc = gf_config
avail_models = cake.builtin_models()
if earth_model_name not in avail_models and earth_model_name != 'local':
raise NotImplementedError(
'Earthmodel name "%s" not available!'
' Implemented models: %s' % (
earth_model_name, utility.list2string(avail_models)))
if custom_velocity_model is not None:
logger.info('Using custom model from config file')
if earth_model_name == 'local':
logger.info(
'Using only custom velocity model, not pasting into '
'global background model.')
source_model = custom_velocity_model
else:
global_model = cake.load_model(earth_model_name)
source_model = utility.join_models(
global_model, custom_velocity_model)
elif gfc.use_crust2:
logger.info('Using crust2 profile')
# load velocity profile from CRUST2x2 and check for water layer
profile = crust2x2.get_profile(location.lat, location.lon)
if gfc.replace_water:
logger.debug('Replacing water layers! ...')
thickness_lwater = profile.get_layer(crust2x2.LWATER)[0]
if thickness_lwater > 0.0:
logger.info(
'Water layer %f in CRUST model!'
' Remove and add to lower crust' % thickness_lwater)
thickness_llowercrust = profile.get_layer(
crust2x2.LLOWERCRUST)[0]
thickness_lsoftsed = profile.get_layer(
crust2x2.LSOFTSED)[0]
profile.set_layer_thickness(crust2x2.LWATER, 0.0)
profile.set_layer_thickness(
crust2x2.LSOFTSED,
num.ceil(thickness_lsoftsed / 3))
profile.set_layer_thickness(
crust2x2.LLOWERCRUST,
thickness_llowercrust +
thickness_lwater +
(thickness_lsoftsed - num.ceil(thickness_lsoftsed / 3)))
profile._elevation = 0.0
logger.info('New Lower crust layer thickness %f' %
profile.get_layer(crust2x2.LLOWERCRUST)[0])
else:
logger.debug('Not replacing water layers')
source_model = cake.load_model(
earth_model_name, crust2_profile=profile)
else:
logger.info('Using global model ...')
source_model = cake.load_model(earth_model_name)
if crust_ind > 0:
source_model = ensemble_earthmodel(
source_model,
num_vary=1,
error_depth=gfc.error_depth,
error_velocities=gfc.error_velocities,
depth_limit_variation=gfc.depth_limit_variation * km)[0]
return source_model
def get_slowness_taper(fomosto_config, velocity_model, distances):
"""
Calculate slowness taper for backends that determine wavefield based
on the velociy model.
Parameters
----------
fomosto_config : :class:`pyrocko.meta.Config`
velocity_model : :class:`pyrocko.cake.LayeredModel`
distances : tuple
minimum and maximum distance [deg]
Returns
-------
tuple of slownesses
"""
fc = fomosto_config
phases = [phase.phases for phase in fc.tabulated_phases]
all_phases = []
for phase in phases:
all_phases.extend(phase)
mean_source_depth = num.mean(
(fc.source_depth_min, fc.source_depth_max)) / km
dists = num.linspace(distances[0], distances[1], 100)
arrivals = velocity_model.arrivals(
phases=all_phases,
distances=dists,
zstart=mean_source_depth)
ps = num.array([arrivals[i].p for i in range(len(arrivals))])
slownesses = ps / (cake.r2d * cake.d2m / km)
smax = slownesses.max()
return (0.0, 0.0, 1.1 * float(smax), 1.3 * float(smax))
def get_earth_model_prefix(earth_model_name):
return earth_model_name.split('-')[0].split('.')[0]
def get_fomosto_baseconfig(
gfconfig, event, station, waveforms, crust_ind):
"""
Initialise fomosto config.
Parameters
----------
gfconfig : :class:`config.NonlinearGFConfig`
event : :class:`pyrocko.model.Event`
The event is used as a reference point for all the calculations
According to the its location the earth model is being built
station : :class:`pyrocko.model.Station` or
:class:`heart.ReferenceLocation`
waveforms : List of str
Waveforms to calculate GFs for, determines the length of traces
crust_ind : int
Index to set to the Greens Function store
"""
sf = gfconfig
if gfconfig.code != 'psgrn' and len(waveforms) < 1:
raise IOError('No waveforms specified! No GFs to be calculated!')
# calculate event-station distance [m]
distance = orthodrome.distance_accurate50m(event, station)
distance_min = distance - (sf.source_distance_radius * km)
if distance_min < 0.:
logger.warn(
'Minimum grid distance is below zero. Setting it to zero!')
distance_min = 0.
# define phases
tabulated_phases = []
if 'any_P' in waveforms:
if sf.earth_model_name == 'local':
definition = 'p,P,p\\,P\\'
else:
definition = 'p,P,p\\,P\\,Pv_(cmb)p'
tabulated_phases.append(gf.TPDef(
id='any_P',
definition=definition))
if 'any_S' in waveforms:
tabulated_phases.append(gf.TPDef(
id='any_S',
definition='s,S,s\\,S\\'))
# surface waves
if 'slowest' in waveforms:
tabulated_phases.append(gf.TPDef(
id='slowest',
definition='0.8'))
return gf.ConfigTypeA(
id='%s_%s_%.3fHz_%s' % (
station.station,
get_earth_model_prefix(sf.earth_model_name),
sf.sample_rate,
crust_ind),
ncomponents=10,
sample_rate=sf.sample_rate,
receiver_depth=0. * km,
source_depth_min=sf.source_depth_min * km,
source_depth_max=sf.source_depth_max * km,
source_depth_delta=sf.source_depth_spacing * km,
distance_min=distance_min,
distance_max=distance + (sf.source_distance_radius * km),
distance_delta=sf.source_distance_spacing * km,
tabulated_phases=tabulated_phases)
backend_builders = {
'qseis': qseis.build,
'qssp': qssp.build,
'qseis2d': qseis2d.build}
def choose_backend(
fomosto_config, code, source_model, receiver_model,
gf_directory='qseis2d_green'):
"""
Get backend related config.
"""
fc = fomosto_config
receiver_basement_depth = 150 * km
distances = num.array([fc.distance_min, fc.distance_max]) * cake.m2d
slowness_taper = get_slowness_taper(fc, source_model, distances)
waveforms = [phase.id for phase in fc.tabulated_phases]
if 'slowest' in waveforms and code != 'qseis':
raise TypeError(
'For near-field phases the "qseis" backend has to be used!')
if code == 'qseis':
version = '2006a'
if 'slowest' in waveforms or distances.min() < 10:
logger.info(
'Receiver and source'
' site structures have to be identical as distance'
' and ray depth not high enough for common receiver'
' depth!')
receiver_model = None
slowness_taper = (0., 0., 0., 0.)
sw_algorithm = 0
sw_flat_earth_transform = 0
else:
# find common basement layer
common_basement = source_model.layer(receiver_basement_depth)
receiver_model = receiver_model.extract(
depth_max=common_basement.ztop)
receiver_model.append(common_basement)
sw_algorithm = 1
sw_flat_earth_transform = 1
conf = qseis.QSeisConfig(
filter_shallow_paths=0,
slowness_window=slowness_taper,
wavelet_duration_samples=0.001,
sw_flat_earth_transform=sw_flat_earth_transform,
sw_algorithm=sw_algorithm,
qseis_version=version)
elif code == 'qssp':
source_model = copy.deepcopy(receiver_model)
receiver_model = None
version = '2010'
conf = qssp.QSSPConfig(
qssp_version=version,
slowness_max=float(num.max(slowness_taper)),
toroidal_modes=True,
spheroidal_modes=True,
source_patch_radius=(
fc.distance_delta - fc.distance_delta * 0.05) / km)
elif code == 'qseis2d':
version = '2014'
conf = qseis2d.QSeis2dConfig()
conf.qseis_s_config.slowness_window = slowness_taper
conf.qseis_s_config.calc_slowness_window = 0
conf.qseis_s_config.receiver_max_distance = \
distances[1] * cake.d2m / km
conf.qseis_s_config.sw_flat_earth_transform = 1
conf.gf_directory = gf_directory
# find common basement layer
layer = source_model.layer(receiver_basement_depth)
conf.qseis_s_config.receiver_basement_depth = \
round(layer.zbot / km, 1)
receiver_model = receiver_model.extract(
depth_max=layer.ztop)
receiver_model.append(layer)
else:
raise NotImplementedError('Backend not supported: %s' % code)
# fill remaining fomosto params
fc.earthmodel_1d = source_model
fc.earthmodel_receiver_1d = receiver_model
fc.modelling_code_id = code + '.' + version
window_extension = 60. # [s]
pids = ['stored:' + wave for wave in waveforms]
conf.time_region = (
gf.Timing(
phase_defs=pids, offset=(-1.1 * window_extension), select='first'),
gf.Timing(
phase_defs=pids, offset=(1.6 * window_extension), select='last'))
conf.cut = (
gf.Timing(
phase_defs=pids, offset=-window_extension, select='first'),
gf.Timing(
phase_defs=pids, offset=(1.5 * window_extension), select='last'))
conf.relevel_with_fade_in = True
conf.fade = (
gf.Timing(
phase_defs=pids, offset=-window_extension, select='first'),
gf.Timing(
phase_defs=pids, offset=(-0.1) * window_extension, select='first'),
gf.Timing(
phase_defs=pids, offset=(window_extension), select='last'),
gf.Timing(
phase_defs=pids, offset=(1.6 * window_extension), select='last'))
return conf
def seis_construct_gf(
stations, event, seismic_config, crust_ind=0, execute=False,
force=False):
"""
Calculate seismic Greens Functions (GFs) and create a repository 'store'
that is being used later on repeatetly to calculate the synthetic
waveforms.
Parameters
----------
stations : list
of :class:`pyrocko.model.Station`
Station object that defines the distance from the event for which the
GFs are being calculated
event : :class:`pyrocko.model.Event`
The event is used as a reference point for all the calculations
According to the its location the earth model is being built
seismic_config : :class:`config.SeismicConfig`
crust_ind : int
Index to set to the Greens Function store, 0 is reference store
indexes > 0 use reference model and vary its parameters by a Gaussian
execute : boolean
Flag to execute the calculation, if False just setup tested
force : boolean
Flag to overwrite existing GF stores
"""
sf = seismic_config.gf_config
source_model = get_velocity_model(
event, earth_model_name=sf.earth_model_name, crust_ind=crust_ind,
gf_config=sf, custom_velocity_model=sf.custom_velocity_model)
waveforms = seismic_config.get_waveform_names()
for station in stations:
logger.info('Station %s' % station.station)
logger.info('---------------------')
fomosto_config = get_fomosto_baseconfig(
sf, event, station, waveforms, crust_ind)
store_dir = os.path.join(sf.store_superdir, fomosto_config.id)
if not os.path.exists(store_dir) or force:
logger.info('Creating Store at %s' % store_dir)
if len(stations) == 1:
custom_velocity_model = sf.custom_velocity_model
else:
custom_velocity_model = None
receiver_model = get_velocity_model(
station, earth_model_name=sf.earth_model_name,
crust_ind=crust_ind, gf_config=sf,
custom_velocity_model=custom_velocity_model)
gf_directory = os.path.join(
sf.store_superdir, 'base_gfs_%i' % crust_ind)
conf = choose_backend(
fomosto_config, sf.code, source_model, receiver_model,
gf_directory)
fomosto_config.validate()
conf.validate()
gf.Store.create_editables(
store_dir,
config=fomosto_config,
extra={sf.code: conf},
force=force)
else:
logger.info(
'Store %s exists! Use force=True to overwrite!' % store_dir)
traces_path = os.path.join(store_dir, 'traces')
if execute:
if not os.path.exists(traces_path) or force:
logger.info('Filling store ...')
store = gf.Store(store_dir, 'r')
store.make_ttt(force=force)
store.close()
backend_builders[sf.code](
store_dir, nworkers=sf.nworkers, force=force)
if sf.rm_gfs and sf.code == 'qssp':
gf_dir = os.path.join(store_dir, 'qssp_green')
logger.info('Removing QSSP Greens Functions!')
shutil.rmtree(gf_dir)
else:
logger.info('Traces exist use force=True to overwrite!')
def geo_construct_gf(
event, geodetic_config, crust_ind=0, execute=True, force=False):
"""
Calculate geodetic Greens Functions (GFs) and create a fomosto 'GF store'
that is being used repeatetly later on to calculate the synthetic
displacements. Enables various different source geometries.
Parameters
----------
event : :class:`pyrocko.model.Event`
The event is used as a reference point for all the calculations
According to the its location the earth model is being built
geodetic_config : :class:`config.GeodeticConfig`
crust_ind : int
Index to set to the Greens Function store
execute : boolean
Flag to execute the calculation, if False just setup tested
force : boolean
Flag to overwrite existing GF stores
"""
from pyrocko.fomosto import psgrn_pscmp as ppp
version = '2008a'
gfc = geodetic_config.gf_config
# extract source crustal profile and check for water layer
source_model = get_velocity_model(
event, earth_model_name=gfc.earth_model_name,
crust_ind=crust_ind, gf_config=gfc,
custom_velocity_model=gfc.custom_velocity_model).extract(
depth_max=gfc.source_depth_max * km)
c = ppp.PsGrnPsCmpConfig()
c.pscmp_config.version = version
c.psgrn_config.version = version
c.psgrn_config.sampling_interval = gfc.sampling_interval
c.psgrn_config.gf_depth_spacing = gfc.medium_depth_spacing
c.psgrn_config.gf_distance_spacing = gfc.medium_distance_spacing
station = ReferenceLocation(
station='statics',
lat=event.lat,
lon=event.lon)
fomosto_config = get_fomosto_baseconfig(
gfconfig=gfc, event=event, station=station,
waveforms=[], crust_ind=crust_ind)
store_dir = os.path.join(gfc.store_superdir, fomosto_config.id)
if not os.path.exists(store_dir) or force:
logger.info('Create Store at: %s' % store_dir)
logger.info('---------------------------')
# potentially vary source model
if crust_ind > 0:
source_model = ensemble_earthmodel(
source_model,
num_vary=1,
error_depth=gfc.error_depth,
error_velocities=gfc.error_velocities)[0]
fomosto_config.earthmodel_1d = source_model
fomosto_config.modelling_code_id = 'psgrn_pscmp.%s' % version
c.validate()
fomosto_config.validate()
gf.store.Store.create_editables(
store_dir, config=fomosto_config,
extra={'psgrn_pscmp': c}, force=force)
else:
logger.info(
'Store %s exists! Use force=True to overwrite!' % store_dir)
traces_path = os.path.join(store_dir, 'traces')
if execute:
if not os.path.exists(traces_path) or force:
logger.info('Filling store ...')
store = gf.store.Store(store_dir, 'r')
store.close()
# build store
try:
ppp.build(store_dir, nworkers=gfc.nworkers, force=force)
except ppp.PsCmpError as e:
if str(e).find('could not start psgrn/pscmp') != -1:
logger.warn('psgrn/pscmp not installed')
return
else:
raise
else:
logger.info('Traces exist use force=True to overwrite!')
def geo_construct_gf_psgrn(
event, geodetic_config, crust_ind=0, execute=True, force=False):
"""
Calculate geodetic Greens Functions (GFs) and create a repository 'store'
that is being used later on repeatetly to calculate the synthetic
displacements.
Parameters
----------
event : :class:`pyrocko.model.Event`
The event is used as a reference point for all the calculations
According to the its location the earth model is being built
geodetic_config : :class:`config.GeodeticConfig`
crust_ind : int
Index to set to the Greens Function store
execute : boolean
Flag to execute the calculation, if False just setup tested
force : boolean
Flag to overwrite existing GF stores
"""
logger.warn(
'This function is deprecated and might be removed in later versions!')
gfc = geodetic_config.gf_config
c = psgrn.PsGrnConfigFull()
n_steps_depth = int(
(gfc.source_depth_max - gfc.source_depth_min) /
gfc.source_depth_spacing) + 1
n_steps_distance = int(
(gfc.source_distance_max - gfc.source_distance_min) /
gfc.source_distance_spacing) + 1
c.distance_grid = psgrn.PsGrnSpatialSampling(
n_steps=n_steps_distance,
start_distance=gfc.source_distance_min,
end_distance=gfc.source_distance_max)
c.depth_grid = psgrn.PsGrnSpatialSampling(
n_steps=n_steps_depth,
start_distance=gfc.source_depth_min,
end_distance=gfc.source_depth_max)
c.sampling_interval = gfc.sampling_interval
# extract source crustal profile and check for water layer
source_model = get_velocity_model(
event, earth_model_name=gfc.earth_model_name,
crust_ind=crust_ind, gf_config=gfc,
custom_velocity_model=gfc.custom_velocity_model).extract(
depth_max=gfc.source_depth_max * km)
# potentially vary source model
if crust_ind > 0:
source_model = ensemble_earthmodel(
source_model,
num_vary=1,
error_depth=gfc.error_depth,
error_velocities=gfc.error_velocities)[0]
c.earthmodel_1d = source_model
c.psgrn_outdir = os.path.join(
gfc.store_superdir, 'psgrn_green_%i' % (crust_ind))
c.validate()
util.ensuredir(c.psgrn_outdir)
runner = psgrn.PsGrnRunner(outdir=c.psgrn_outdir)
if not execute:
logger.info('Geo GFs can be created in directory: %s ! '
'(execute=True necessary)! GF params: \n' % c.psgrn_outdir)
print(c)
if execute:
logger.info('Creating Geo GFs in directory: %s' % c.psgrn_outdir)
runner.run(c, force)
def geo_layer_synthetics_pscmp(
store_superdir, crust_ind, lons, lats, sources,
keep_tmp=False, outmode='data'):
"""
Calculate synthetic displacements for a given Greens Function database
sources and observation points on the earths surface.
Parameters
----------
store_superdir : str
main path to directory containing the different Greensfunction stores
crust_ind : int
index of Greens Function store to use
lons : List of floats
Longitudes [decimal deg] of observation points
lats : List of floats
Latitudes [decimal deg] of observation points
sources : List of :class:`pscmp.PsCmpRectangularSource`
Sources to calculate synthetics for
keep_tmp : boolean
Flag to keep directories (in '/tmp') where calculated synthetics are
stored.
outmode : str
determines type of output
Returns
-------
:class:`numpy.ndarray` (n_observations; ux-North, uy-East, uz-Down)
"""
c = pscmp.PsCmpConfigFull()
c.observation = pscmp.PsCmpScatter(lats=lats, lons=lons)
c.psgrn_outdir = os.path.join(
store_superdir, 'psgrn_green_%i/' % (crust_ind))
# only coseismic displacement
c.times_snapshots = [0]
c.rectangular_source_patches = sources
runner = pscmp.PsCmpRunner(keep_tmp=keep_tmp)
runner.run(c)
# returns list of displacements for each snapshot
return runner.get_results(component='displ', flip_z=True)[0]
class RayPathError(Exception):
pass
def get_phase_arrival_time(engine, source, target, wavename=None, snap=True):
"""
Get arrival time from Greens Function store for respective
:class:`pyrocko.gf.seismosizer.Target`,
:class:`pyrocko.gf.meta.Location` pair.
Parameters
----------
engine : :class:`pyrocko.gf.seismosizer.LocalEngine`
source : :class:`pyrocko.gf.meta.Location`
can be therefore :class:`pyrocko.gf.seismosizer.Source` or
:class:`pyrocko.model.Event`
target : :class:`pyrocko.gf.seismosizer.Target`
wavename : string
of the tabulated phase that determines the phase arrival
needs to be the Id of a tabulated phase in the respective target.store
if "None" uses first tabulated phase
snap : if True
force arrival time on discrete samples of the store
Returns
-------
scalar, float of the arrival time of the wave
"""
dist = target.distance_to(source)
try:
store = engine.get_store(target.store_id)
except gf.seismosizer.NoSuchStore:
raise gf.seismosizer.NoSuchStore(
'No such store with ID %s found, distance [deg] to event: %f ' % (
target.store_id, cake.m2d * dist))
if wavename is None:
wavename = store.config.tabulated_phases[0].id
logger.debug(
'Wavename not specified using '
'first tabulated phase! %s' % wavename)
logger.debug('Arrival time for wavename "%s" distance %f [deg]' % (
wavename, cake.m2d * dist))
try:
atime = store.t(wavename, (source.depth, dist)) + source.time
except TypeError:
raise RayPathError(
'No wave-arrival for wavename "%s" distance %f [deg]! '
'Please adjust the distance range in the wavemap config!' % (
wavename, cake.m2d * dist))
if snap:
deltat = 1. / store.config.sample_rate
atime = trace.t2ind(atime, deltat, snap=round) * deltat
return atime
def get_phase_taperer(
engine, source, wavename, target, arrival_taper, arrival_time=num.nan):
"""
Create phase taperer according to synthetic travel times from
source- target pair and taper return :class:`pyrocko.trace.CosTaper`
according to defined arrival_taper times.
Parameters
----------
engine : :class:`pyrocko.gf.seismosizer.LocalEngine`
source : :class:`pyrocko.gf.meta.Location`
can be therefore :class:`pyrocko.gf.seismosizer.Source` or
:class:`pyrocko.model.Event`
wavename : string
of the tabulated phase that determines the phase arrival
target : :class:`pyrocko.gf.seismosizer.Target`
arrival_taper : :class:`ArrivalTaper`
arrival_time : shift on arrival time (optional)
Returns
-------
:class:`pyrocko.trace.CosTaper`
"""
if num.isnan(arrival_time):
logger.warning('Using source reference for tapering!')
arrival_time = get_phase_arrival_time(
engine=engine, source=source, target=target, wavename=wavename)
return arrival_taper.get_pyrocko_taper(float(arrival_time))
class WaveformMapping(object):
"""
Maps synthetic waveform parameters to targets, stations and data
Parameters
----------
name : str
name of the waveform according to travel time tables
stations : list
of :class:`pyrocko.model.Station`
weights : list
of theano.shared variables
channels : list
of channel names valid for all the stations of this wavemap
datasets : list
of :class:`heart.Dataset` inherited from :class:`pyrocko.trace.Trace`
targets : list
of :class:`pyrocko.gf.target.Target`
"""
def __init__(self, name, stations, weights=None, channels=['Z'],
datasets=[], targets=[], deltat=None):
self.name = name
self.stations = stations
self.weights = weights
self.datasets = datasets
self.targets = targets
self.channels = channels
self.station_correction_idxs = None
self._prepared_data = None
self._arrival_times = None
self._target2index = None
self._station2index = None
self.deltat = deltat
self.config = None
if self.datasets is not None:
self._update_trace_wavenames()
self._update_station_corrections()
def target_index_mapping(self):
if self._target2index is None:
self._target2index = dict(
(target, i) for (i, target) in enumerate(
self.targets))
return self._target2index
def station_index_mapping(self):
if self._station2index is None:
self._station2index = dict(
(station, i) for (i, station) in enumerate(
self.stations))
return self._station2index
def add_weights(self, weights, force=False):
n_w = len(weights)
if n_w != self.n_t:
raise CollectionError(
'Number of Weights %i inconsistent with targets %i!' % (
n_w, self.n_t))
self.weights = weights
def _update_station_corrections(self):
"""
Update station_correction_idx
"""
s2i = self.station_index_mapping()
station_idxs = []
for channel in self.channels:
station_idxs.extend(
[s2i[station] for station in self.stations])
self.station_correction_idxs = num.array(
station_idxs, dtype='int16')
def station_weeding(self, event, distances, blacklist=[]):
"""
Weed stations and related objects based on distances and blacklist.
Works only a single time after init!
"""
empty_stations = self.get_station_names_without_data()
blacklist.extend(empty_stations)
self.stations = utility.apply_station_blacklist(
self.stations, blacklist)
self.stations = utility.weed_stations(
self.stations, event, distances=distances, remove_duplicate=True)
if self.n_data > 0:
self.datasets = utility.weed_data_traces(
self.datasets, self.stations)
self.targets = utility.weed_targets(self.targets, self.stations)
# reset mappings
self._target2index = None
self._station2index = None
if self.n_t > 0:
self._update_station_corrections()
self.check_consistency()
def get_station_names(self):
"""
Returns list of strings of station names
"""
return [station.station for station in self.stations]
def get_station_names_without_data(self):
blacklist = []
station_names = self.get_station_names()
dataset_station_names = [tr.station for tr in self.datasets]
for station_name in station_names:
if station_name not in dataset_station_names:
logger.warning(
'Found no data for station "%s" '
'- removing it from setup.' % station_name)
blacklist.append(station_name)
return blacklist
def check_consistency(self):
if self.n_t != self.n_data:
raise CollectionError(
'Inconsistent number of datasets and targets!')
elif self.n_t == 0:
raise CollectionError(
'No data left in wavemap "%s" after applying the distance '
'filter! Either (1) Adjust distance range (set "distances" '
' parameter in beat.WaveformFitConfig, given in degrees '
' epicentral distance) or (2) deactivate the wavemap '
'completely by setting include=False!' % self._mapid)
else:
logger.info('Consistent number of '
'datasets and targets in %s wavemap!' % self._mapid)
def update_interpolation(self, method):
for target in self.targets:
target.interpolation = method
def _update_trace_wavenames(self, wavename=None):
if wavename is None:
wavename = self.name
for dtrace in self.datasets:
dtrace.set_wavename(wavename)
@property
def _mapid(self):
if hasattr(self, 'mapnumber'):
return '_'.join((self.name, str(self.mapnumber)))
else:
return self.name
@property
def time_shifts_id(self):
return 'time_shifts_' + self._mapid
@property
def n_t(self):
return len(self.targets)
@property
def hypersize(self):
"""
Return the size of the related hyperparameters as an integer.
"""
nhyp = self.n_t / len(self.channels)
if nhyp.is_integer():
return int(nhyp)
else:
raise ValueError(
'hyperparameter size is not integer '
'for wavemap %s' % self._mapid)
@property
def n_data(self):
return len(self.datasets)
def get_target_idxs(self, channels=['Z']):
t2i = self.target_index_mapping()
dtargets = utility.gather(self.targets, lambda t: t.codes[3])
tidxs = []
for cha in channels:
tidxs.extend([t2i[target] for target in dtargets[cha]])
return tidxs
def prepare_data(
self, source, engine, outmode='array', chop_bounds=['b', 'c']):
"""
Taper, filter data traces according to given reference event.
Traces are concatenated to one single array.
"""
if self._prepared_data is not None:
logger.debug(
'Overwriting observed data windows in "%s"!' %
(self.name + '_' + str(self.mapnumber)))
if hasattr(self, 'config'):
arrival_times = num.zeros((self.n_t), dtype=tconfig.floatX)
for i, target in enumerate(self.targets):
arrival_times[i] = get_phase_arrival_time(
engine=engine, source=source,
target=target, wavename=self.name)
if self.config.preprocess_data:
logger.debug('Pre-processing data ...')
filterer = self.config.filterer
else:
logger.debug('Not pre-processing data ...')
filterer = None
self._prepared_data = taper_filter_traces(
self.datasets,
arrival_taper=self.config.arrival_taper,
filterer=filterer,
arrival_times=arrival_times,
outmode=outmode,
chop_bounds=chop_bounds,
deltat=self.deltat,
plot=False)
self._arrival_times = arrival_times
else:
raise ValueError('Wavemap needs configuration!')
def get_highest_frequency(self):
"""
Loop over filterers and return highest frequency.
"""
highest_fs = []
for filt in self.config.filterer:
if isinstance(filt, Filter):
highest_fs.append(filt.upper_corner)
if len(highest_fs) > 0:
return num.max(highest_fs)
else:
raise ValueError(
'No Filterer objects configured for wavemap %s'
'Cannot return upper corner frequency!' % self._mapid)
@property
def shared_data_array(self):
if self._prepared_data is None:
raise ValueError('Data array is not initialized')
elif isinstance(self._prepared_data, list):
raise ValueError(
'Data got initialized as pyrocko traces, need array!')
else:
return shared(
self._prepared_data,
name='%s_data' % self.name, borrow=True)
class CollectionError(Exception):
pass
class DataWaveformCollection(object):
"""
Collection of available datasets, data-weights, waveforms and
DynamicTargets used to create synthetics.
Is used to return Mappings of the waveforms of interest to fit to the
involved data, weights and synthetics generating objects.
Parameters
----------
stations : List of :class:`pyrocko.model.Station`
List of station objects that are contained in the dataset
waveforms : list
of strings of tabulated phases that are to be used for misfit
calculation
target_deltat : float
sampling interval the data is going to be downsampled to
"""
def __init__(self, stations, waveforms=None, target_deltat=None):
self.stations = stations
self.waveforms = waveforms
self._deltat = target_deltat
self._targets = OrderedDict()
self._datasets = OrderedDict()
self._raw_datasets = OrderedDict()
self._responses = None
self._target2index = None
self._station2index = None
def adjust_sampling_datasets(self, deltat, snap=False, force=False):
for tr in self._raw_datasets.values():
if tr.nslc_id not in self._datasets or force:
self._datasets[tr.nslc_id] = \
utility.downsample_trace(tr, deltat, snap=snap)
else:
raise CollectionError(
'Downsampled trace %s already in'
' collection!' % utility.list2string(tr.nslc_id))
self._deltat = deltat
def _check_collection(self, waveform, errormode='not_in', force=False):
if errormode == 'not_in':
if waveform not in self.waveforms:
raise CollectionError(
'Waveform is not contained in collection!')
else:
pass
elif errormode == 'in':
if waveform in self.waveforms and not force:
raise CollectionError('Wavefom already in collection!')
else:
pass
@property
def n_t(self):
return len(self._targets.keys())
def add_collection(self, waveform=None, datasets=None, targets=None,
weights=None, force=False):
self.add_waveform(waveform, force=force)
self.add_targets(waveform, targets, force=force)
self.add_datasets(waveform, datasets, force=force)
@property
def n_waveforms(self):
return len(self.waveforms)
def target_index_mapping(self):
if self._target2index is None:
self._target2index = dict(
(target, i) for (i, target) in enumerate(
self._targets.values()))
return self._target2index
def get_waveform_names(self):
return self.waveforms
def get_dataset(self, nslc, raw=False):
if not raw:
return self._datasets[nslc]
else:
return self._raw_datasets[nslc]
def add_waveforms(self, waveforms=[], force=False):
for waveform in waveforms:
self._check_collection(waveform, errormode='in', force=force)
self.waveforms.append(waveform)
def add_responses(self, responses, location=None):
self._responses = OrderedDict()
for k, v in responses.items():
if location is not None:
k = list(k)
k[2] = str(location)
k = tuple(k)
self._responses[k] = v
def add_targets(self, targets, replace=False, force=False):
if replace:
self._targets = OrderedDict()
current_targets = self._targets.values()
for target in targets:
if target not in current_targets or force:
self._targets[target.codes] = target
else:
logger.warn(
'Target %s already in collection!' % str(target.codes))
def add_datasets(self, datasets, location=None, replace=False,
force=False):
if replace:
self._datasets = OrderedDict()
self._raw_datasets = OrderedDict()
entries = self._raw_datasets.keys()
for d in datasets:
if location is not None:
d.set_location(str(location))
nslc_id = d.nslc_id
if nslc_id not in entries or force:
self._raw_datasets[nslc_id] = d
else:
logger.warn(
'Dataset %s already in collection!' % str(nslc_id))
@property
def n_data(self):
return len(self._datasets.keys())
def get_waveform_mapping(
self, waveform, channels=['Z', 'T', 'R'], quantity='displacement'):
self._check_collection(waveform, errormode='not_in')
dtargets = utility.gather(
self._targets.values(), lambda t: t.codes[3])
targets = []
for cha in channels:
targets.extend(dtargets[cha])
datasets = []
discard_targets = []
for target in targets:
target.quantity = quantity
nslc_id = target.codes
try:
dtrace = self._raw_datasets[nslc_id]
datasets.append(dtrace)
except KeyError:
logger.warn(
'No data trace for target %s in '
'the collection! Removing target!' % str(nslc_id))
discard_targets.append(target)
if self._responses:
try:
target.update_response(*self._responses[nslc_id])
except KeyError:
logger.warn(
'No response for target %s in '
'the collection!' % str(nslc_id))
targets = utility.weed_targets(
targets, self.stations, discard_targets=discard_targets)
ndata = len(datasets)
n_t = len(targets)
if ndata != n_t:
logger.warn(
'Inconsistent number of targets %i '
'and datasets %i! in wavemap %s init' % (n_t, ndata, waveform))
return WaveformMapping(
name=waveform,
stations=copy.deepcopy(self.stations),
datasets=copy.deepcopy(datasets),
targets=copy.deepcopy(targets),
channels=channels,
deltat=self._deltat)
def concatenate_datasets(datasets):
"""
Concatenate datasets to single arrays
Parameters
----------
datasets : list
of :class:`GeodeticDataset`
Returns
-------
datasets : 1d :class:numpy.NdArray` n x 1
los_vectors : 2d :class:numpy.NdArray` n x 3
odws : 1d :class:numpy.NdArray` n x 1
Bij : :class:`utility.ListToArrayBijection`
"""
_disp_list = [data.displacement.astype(tconfig.floatX)
for data in datasets]
_odws_list = [data.odw.astype(tconfig.floatX)
for data in datasets]
_lv_list = [data.update_los_vector().astype(tconfig.floatX)
for data in datasets]
# merge geodetic data to calculate residuals on single array
ordering = utility.ListArrayOrdering(_disp_list, intype='numpy')
Bij = utility.ListToArrayBijection(ordering, _disp_list)
odws = Bij.l2a(_odws_list).astype(tconfig.floatX)
datasets = Bij.l2a(_disp_list).astype(tconfig.floatX)
los_vectors = Bij.f3map(_lv_list).astype(tconfig.floatX)
return datasets, los_vectors, odws, Bij
def init_datahandler(
seismic_config, seismic_data_path='./', responses_path=None):
"""
Initialise datahandler.
Parameters
----------
seismic_config : :class:`config.SeismicConfig`
seismic_data_path : str
absolute path to the directory of the seismic data
Returns
-------
datahandler : :class:`DataWaveformCollection`
"""
sc = seismic_config
stations, data_traces = utility.load_objects(seismic_data_path)
wavenames = sc.get_waveform_names()
targets = init_seismic_targets(
stations,
earth_model_name=sc.gf_config.earth_model_name,
channels=sc.get_unique_channels(),
sample_rate=sc.gf_config.sample_rate,
crust_inds=[sc.gf_config.reference_model_idx],
reference_location=sc.gf_config.reference_location)
target_deltat = 1. / sc.gf_config.sample_rate
datahandler = DataWaveformCollection(stations, wavenames, target_deltat)
datahandler.add_datasets(
data_traces, location=sc.gf_config.reference_model_idx)
# decimation needs to come after filtering
# datahandler.adjust_sampling_datasets(target_deltat, snap=True)
datahandler.add_targets(targets)
if responses_path:
responses = utility.load_objects(responses_path)
datahandler.add_responses(
responses, location=sc.gf_config.reference_model_idx)
return datahandler
def init_wavemap(
waveformfit_config, datahandler=None, event=None, mapnumber=0):
"""
Initialise wavemap, which sets targets, datasets and stations into
relation to the seismic Phase of interest and allows individual
specificiations.
Parameters
----------
waveformfit_config : :class:`config.WaveformFitConfig`
datahandler : :class:`DataWaveformCollection`
event : :class:`pyrocko.model.Event`
mapnumber : int
number of wavemap in list of wavemaps
Returns
-------
wmap : :class:`WaveformMapping`
"""
wc = waveformfit_config
wmap = datahandler.get_waveform_mapping(
wc.name, channels=wc.channels, quantity=wc.quantity)
wmap.config = wc
wmap.mapnumber = mapnumber
wmap.config.arrival_taper.check_sample_rate_consistency(
datahandler._deltat)
wmap.station_weeding(event, wc.distances, blacklist=wc.blacklist)
wmap.update_interpolation(wc.interpolation)
wmap._update_trace_wavenames('_'.join([wc.name, str(wmap.mapnumber)]))
logger.info('Number of seismic datasets for wavemap: %s: %i ' % (
wmap._mapid, wmap.n_data))
return wmap
def post_process_trace(
trace, taper, filterer, taper_tolerance_factor=0., deltat=None,
outmode=None, chop_bounds=['b', 'c'], transfer_function=None):
"""
Taper, filter and then chop one trace in place.
Parameters
----------
trace : :class:`SeismicDataset`
arrival_taper : :class:`pyrocko.trace.Taper`
filterer : list
of :class:`Filterer`
taper_tolerance_factor : float
default: 0 , cut exactly at the taper edges
taper.fadein times this factor determines added tolerance
chop_bounds : str
determines where to chop the trace on the taper attributes
may be combination of [a, b, c, d]
"""
if transfer_function:
# convolve invert False deconvolve invert True
dummy_filterer = FrequencyFilter()
trace = trace.transfer(
dummy_filterer.tfade, dummy_filterer.freqlimits,
transfer_function=transfer_function,
invert=False, cut_off_fading=False)
logger.debug('transfer trace: %s' % trace.__str__())
if filterer:
# apply all the filters
for filt in filterer:
filt.apply(trace)
if deltat is not None:
trace = utility.downsample_trace(trace, deltat, snap=True)
if taper and outmode != 'data':
tolerance = (taper.b - taper.a) * taper_tolerance_factor
lower_cut = getattr(taper, chop_bounds[0]) - tolerance
upper_cut = getattr(taper, chop_bounds[1]) + tolerance
logger.debug('taper times: %s' % taper.__str__())
logger.debug('trace: %s' % trace.__str__())
trace.extend(lower_cut, upper_cut, fillmethod='zeros')
trace.taper(taper, inplace=True)
trace.chop(tmin=lower_cut, tmax=upper_cut, snap=(num.floor, num.floor))
logger.debug('chopped trace: %s' % trace.__str__())
return trace
class StackingError(Exception):
pass
nzeros = {
'displacement': 2,
'velocity': 3,
}
def proto2zpk(magnification, damping, period, quantity='displacement'):
"""
Convert magnification, damping and period of a station to poles and zeros.
Parameters
----------
magnification : float
gain of station
damping : float
in []
period : float
in [s]
quantity : string
in which related data are recorded
Returns
-------
lists of zeros, poles and gain
"""
import cmath
zeros = num.zeros(nzeros[quantity]).tolist()
omega0 = 2.0 * num.pi / period
preal = - damping * omega0
pimag = 1.0J * omega0 * cmath.sqrt(1.0 - damping ** 2)
poles = [preal + pimag, preal - pimag]
return zeros, poles, magnification
def seis_synthetics(
engine, sources, targets, arrival_taper=None,
wavename='any_P', filterer=None, reference_taperer=None,
plot=False, nprocs=1, outmode='array',
pre_stack_cut=False, taper_tolerance_factor=0.,
arrival_times=None, chop_bounds=['b', 'c']):
"""
Calculate synthetic seismograms of combination of targets and sources,
filtering and tapering afterwards (filterer)
tapering according to arrival_taper around P -or S wave.
If reference_taper the given taper is always used.
Parameters
----------
engine : :class:`pyrocko.gf.seismosizer.LocalEngine`
sources : list
containing :class:`pyrocko.gf.seismosizer.Source` Objects
reference source is the first in the list!!!
targets : list
containing :class:`pyrocko.gf.seismosizer.Target` Objects
arrival_taper : :class:`ArrivalTaper`
wavename : string
of the tabulated phase that determines the phase arrival
filterer : :class:`Filterer`
plot : boolean
flag for looking at traces
nprocs : int
number of processors to use for synthetics calculation
--> currently no effect !!!
outmode : string
output format of synthetics can be 'array', 'stacked_traces',
'data' returns traces unstacked including post-processing,
'tapered_data' returns unstacked but tapered traces
pre_stack_cut : boolean
flag to decide wheather prior to stacking the GreensFunction traces
should be cutted according to the phase arival time and the defined
taper
taper_tolerance_factor : float
tolerance to chop traces around taper.a and taper.d
arrival_times : None or :class:`numpy.NdArray`
of phase to apply taper, if None theoretic arrival of ray tracing used
chop_bounds : list of str
determines where to chop the trace on the taper attributes
may be combination of [a, b, c, d]
transfer_functions : list
of transfer functions to convolve the synthetics with
Returns
-------
:class:`numpy.ndarray` or List of :class:`pyrocko.trace.Trace`
with data each row-one target
:class:`numpy.ndarray` of tmins for traces
"""
stackmodes = ['array', 'data', 'stacked_traces', 'tapered_data']
if outmode not in stackmodes:
raise StackingError(
'Outmode "%s" not available! Available: %s' % (
outmode, utility.list2string(stackmodes)))
if not arrival_times.all():
arrival_times = num.zeros((len(targets)), dtype=tconfig.floatX)
arrival_times[:] = None
taperers = []
tapp = taperers.append
for i, target in enumerate(targets):
if arrival_taper:
tapp(get_phase_taperer(
engine=engine,
source=sources[0],
wavename=wavename,
target=target,
arrival_taper=arrival_taper,
arrival_time=arrival_times[i]))
if pre_stack_cut and arrival_taper and outmode != 'data':
for t, taperer in zip(targets, taperers):
t.update_target_times(sources, taperer)
t_2 = time()
try:
response = engine.process(
sources=sources,
targets=targets, nprocs=nprocs)
t_1 = time()
except IndexError:
for source in sources:
print(source)
raise ValueError('The GF store returned an empty trace!')
logger.debug('Synthetics generation time: %f' % (t_1 - t_2))
# logger.debug('Details: %s \n' % response.stats)
nt = len(targets)
ns = len(sources)
t0 = time()
synt_trcs = []
sapp = synt_trcs.append
taper_index = [j for _ in range(ns) for j in range(nt)]
for i, (source, target, tr) in enumerate(response.iter_results()):
if arrival_taper:
taper = taperers[taper_index[i]]
else:
taper = None
tr = post_process_trace(
trace=tr,
taper=taper,
filterer=filterer,
taper_tolerance_factor=taper_tolerance_factor,
outmode=outmode,
chop_bounds=chop_bounds,
transfer_function=target.response)
sapp(tr)
t1 = time()
logger.debug('Post-process time %f' % (t1 - t0))
if plot:
trace.snuffle(synt_trcs)
if arrival_taper and outmode != 'data':
try:
synths = num.vstack([trc.ydata for trc in synt_trcs])
except ValueError:
lengths = [trc.ydata.size for trc in synt_trcs]
tmins = num.array([trc.tmin for trc in synt_trcs])
tmaxs = num.array([trc.tmax for trc in synt_trcs])
tmins -= tmins.min()
print('lengths', lengths)
print('tmins', tmins)
print('tmaxs', tmins)
print('duration', tmaxs - tmins)
print('arrival_times', arrival_times)
print('arrival_times norm', arrival_times - arrival_times.min())
trace.snuffle(synt_trcs)
raise ValueError('Stacking error, traces different lengths!')
# stack traces for all sources
t6 = time()
if ns == 1:
outstack = synths
else:
outstack = num.zeros([nt, synths.shape[1]])
for k in range(ns):
outstack += synths[(k * nt):(k + 1) * nt, :]
t7 = time()
logger.debug('Stack traces time %f' % (t7 - t6))
# get taper times for tapering data as well
tmins = num.array([getattr(at, chop_bounds[0]) for at in taperers])
else:
# no taper defined so return trace tmins
tmins = num.array([trc.tmin for trc in synt_trcs])
if outmode == 'stacked_traces':
if arrival_taper:
outtraces = []
oapp = outtraces.append
for i in range(nt):
synt_trcs[i].ydata = outstack[i, :]
oapp(synt_trcs[i])
return outtraces, tmins
else:
raise TypeError(
'arrival taper has to be defined for %s type!' % outmode)
elif outmode == 'data':
return synt_trcs, tmins
elif outmode == 'tapered_data':
outlist = [[] for i in range(nt)]
for i, tr in enumerate(synt_trcs):
outlist[taper_index[i]].append(tr)
return outlist, tmins
elif outmode == 'array':
logger.debug('Returning...')
return outstack, tmins
else:
raise TypeError('Outmode %s not supported!' % outmode)
def geo_synthetics(
engine, targets, sources, outmode='stacked_array', plot=False,
nprocs=1):
"""
Calculate synthetic displacements for a given static fomosto Greens
Function database for sources and targets on the earths surface.
Parameters
----------
engine : :class:`pyrocko.gf.seismosizer.LocalEngine`
sources : list
containing :class:`pyrocko.gf.seismosizer.Source` Objects
reference source is the first in the list!!!
targets : list
containing :class:`pyrocko.gf.seismosizer.Target` Objects
plot : boolean
flag for looking at synthetics - not implemented yet
nprocs : int
number of processors to use for synthetics calculation
--> currently no effect !!!
outmode : string
output format of synthetics can be: 'array', 'arrays',
'stacked_array','stacked_arrays'
Returns
-------
depends on outmode:
'stacked_array'
:class:`numpy.ndarray` (n_observations; ux-North, uy-East, uz-Down)
'stacked_arrays'
or list of
:class:`numpy.ndarray` (target.samples; ux-North, uy-East, uz-Down)
"""
response = engine.process(sources, targets)
ns = len(sources)
nt = len(targets)
def stack_arrays(targets, disp_arrays):
stacked_arrays = []
sapp = stacked_arrays.append
for target in targets:
sapp(num.zeros([target.lons.size, 3]))
for k in range(ns):
for l in range(nt):
idx = l + (k * nt)
stacked_arrays[l] += disp_arrays[idx]
return stacked_arrays
disp_arrays = []
dapp = disp_arrays.append
for sresult in response.static_results():
n = sresult.result['displacement.n']
e = sresult.result['displacement.e']
u = -sresult.result['displacement.d']
dapp(num.vstack([n, e, u]).T)
if outmode == 'arrays':
return disp_arrays
elif outmode == 'array':
return num.vstack(disp_arrays)
elif outmode == 'stacked_arrays':
return stack_arrays(targets, disp_arrays)
elif outmode == 'stacked_array':
return num.vstack(stack_arrays(targets, disp_arrays))
else:
raise ValueError('Outmode %s not available' % outmode)
def taper_filter_traces(
traces, arrival_taper=None, filterer=None, deltat=None,
arrival_times=None, plot=False, outmode='array',
taper_tolerance_factor=0., chop_bounds=['b', 'c']):
"""
Taper and filter data_traces according to given taper and filterers.
Tapering will start at the given tmin.
Parameters
----------
traces : List
containing :class:`pyrocko.trace.Trace` objects
arrival_taper : :class:`ArrivalTaper`
filterer : list
of :class:`Filterer`
deltat : float
if set data is downsampled to that sampling interval
arrival_times : list or:class:`numpy.ndarray`
containing the start times [s] since 1st.January 1970 to start
tapering
outmode : str
defines the output structure, options: "stacked_traces", "array",
"data"
taper_tolerance_factor : float
tolerance to chop traces around taper.a and taper.d
chop_bounds : list of len 2
of taper attributes a, b, c, or d
Returns
-------
:class:`numpy.ndarray`
with tapered and filtered data traces, rows different traces,
columns temporal values
"""
cut_traces = []
ctpp = cut_traces.append
for i, tr in enumerate(traces):
cut_trace = tr.copy()
cut_trace.set_location('f')
if arrival_taper:
taper = arrival_taper.get_pyrocko_taper(float(arrival_times[i]))
else:
taper = None
logger.debug(
'Filtering, tapering, chopping ... '
'trace_samples: %i' % cut_trace.ydata.size)
cut_trace = post_process_trace(
trace=cut_trace,
taper=taper,
filterer=filterer,
deltat=deltat,
taper_tolerance_factor=taper_tolerance_factor,
outmode=outmode,
chop_bounds=chop_bounds)
ctpp(cut_trace)
if plot:
trace.snuffle(cut_traces + traces)
if outmode == 'array':
if arrival_taper:
logger.debug('Returning chopped traces ...')
try:
return num.vstack(
[ctr.ydata for ctr in cut_traces])
except ValueError:
raise ValueError(
'Traces have different length, cannot return array!')
else:
raise IOError('Cannot return array without tapering!')
else:
return cut_traces
def velocities_from_pole(
lats, lons, pole_lat, pole_lon, omega, earth_shape='ellipsoid'):
"""
Return horizontal velocities at input locations for rotation around
given Euler pole
Parameters
----------
lats: :class:`numpy.NdArray`
of geographic latitudes [deg] of points to calculate velocities for
lons: :class:`numpy.NdArray`
of geographic longitudes [deg] of points to calculate velocities for
plat: float
Euler pole latitude [deg]
plon: float
Euler pole longitude [deg]
omega: float
angle of rotation around Euler pole [deg / million yrs]
Returns
-------
:class:`numpy.NdArray` of velocities [m / yrs] npoints x 3 (NEU)
"""
r_earth = orthodrome.earthradius
def cartesian_to_local(lat, lon):
rlat = lat * d2r
rlon = lon * d2r
return num.array([
[-num.sin(rlat) * num.cos(rlon), -num.sin(rlat) * num.sin(rlon),
num.cos(rlat)],
[-num.sin(rlon), num.cos(rlon), num.zeros_like(rlat)],
[-num.cos(rlat) * num.cos(rlon), -num.cos(rlat) * num.sin(rlon),
-num.sin(rlat)]])
npoints = lats.size
if earth_shape == 'sphere':
latlons = num.atleast_2d(num.vstack([lats, lons]).T)
platlons = num.hstack([pole_lat, pole_lon])
xyz_points = orthodrome.latlon_to_xyz(latlons)
xyz_pole = orthodrome.latlon_to_xyz(platlons)
elif earth_shape == 'ellipsoid':
xyz = orthodrome.geodetic_to_ecef(lats, lons, num.zeros_like(lats))
xyz_points = num.atleast_2d(num.vstack(xyz).T) / r_earth
xyz_pole = num.hstack(
orthodrome.geodetic_to_ecef(pole_lat, pole_lon, 0.)) / r_earth
omega_rad_yr = omega * 1e-6 * d2r * r_earth
xyz_poles = num.tile(xyz_pole, npoints).reshape(npoints, 3)
v_vecs = num.cross(xyz_poles, xyz_points)
vels_cartesian = omega_rad_yr * v_vecs
T = cartesian_to_local(lats, lons)
return num.einsum('ijk->ik', T * vels_cartesian.T).T
class StrainRateTensor(Object):
exx = Float.T(default=10)
eyy = Float.T(default=0)
exy = Float.T(default=0)
rotation = Float.T(default=0)
def from_point(point):
kwargs = {varname: float(rv) for varname, rv in point.items()}
return StrainRateTensor(**kwargs)
@property
def m4(self):
return num.array([
[self.exx, 0.5 * (self.exy + self.rotation)],
[0.5 * (self.exy - self.rotation), self.eyy]])
@property
def shear_strain_rate(self):
return float(
0.5 * num.sqrt((self.exx - self.eyy) ** 2 + 4 * self.exy ** 2))
@property
def eps1(self):
"""
Maximum extension eigenvalue of strain rate tensor, extension positive.
"""
return float(0.5 * (self.exx + self.eyy) + self.shear_strain_rate)
@property
def eps2(self):
"""
Maximum compression eigenvalue of strain rate tensor,
extension positive.
"""
return float(0.5 * (self.exx + self.eyy) - self.shear_strain_rate)
@property
def azimuth(self):
"""
Direction of eps2 compared towards North [deg].
"""
return float(
0.5 * r2d * num.arctan(2 * self.exy / (self.exx - self.exy)))
def velocities_from_strain_rate_tensor(
lats, lons, exx=0., eyy=0., exy=0., rotation=0.):
"""
Get velocities [m] from 2d area strain rate tensor.
Geographic coordinates are reprojected internally wrt. the centroid of the
input locations.
Parameters
----------
lats : array-like :class:`numpy.ndarray`
geographic latitudes in [deg]
lons : array-like :class:`numpy.ndarray`
geographic longitudes in [deg]
exx : float
component of the 2d area strain-rate tensor [nanostrain] x-North
eyy : float
component of the 2d area strain-rate tensor [nanostrain] y-East
exy : float
component of the 2d area strain-rate tensor [nanostrain]
rotation : float
clockwise rotation rate around the centroid of input locations
Returns
-------
v_xyz: 2d array-like :class:`numpy.ndarray`
Deformation rate in [m] in x - East, y - North, z - Up Direction
"""
D = num.array([
[float(exx), 0.5 * float(exy + rotation)],
[0.5 * float(exy - rotation), float(eyy)]]) * nanostrain
mid_lat, mid_lon = orthodrome.geographic_midpoint(lats, lons)
norths, easts = orthodrome.latlon_to_ne_numpy(mid_lat, mid_lon, lats, lons)
nes = num.atleast_2d(num.vstack([norths, easts]))
v_x, v_y = D.dot(nes)
v_xyz = num.zeros((lats.size, 3))
v_xyz[:, 0] = v_x
v_xyz[:, 1] = v_y
return v_xyz
def get_ramp_displacement(locx, locy, azimuth_ramp, range_ramp, offset):
"""
Get synthetic residual plane in azimuth and range direction of the
satellite.
Parameters
----------
locx : shared array-like :class:`numpy.ndarray`
local coordinates [km] in east direction
locy : shared array-like :class:`numpy.ndarray`
local coordinates [km] in north direction
azimuth_ramp : :class:`theano.tensor.Tensor` or :class:`numpy.ndarray`
vector with ramp parameter in azimuth
range_ramp : :class:`theano.tensor.Tensor` or :class:`numpy.ndarray`
vector with ramp parameter in range
offset : :class:`theano.tensor.Tensor` or :class:`numpy.ndarray`
scalar of offset in [m]
"""
return locy * azimuth_ramp + locx * range_ramp + offset
def check_problem_stores(problem, datatypes):
"""
Check GF stores for empty traces.
"""
logger.info('Checking stores for empty traces ...')
corrupted_stores = {}
for datatype in datatypes:
engine = problem.composites[datatype].engine
storeids = engine.get_store_ids()
cstores = []
for store_id in storeids:
store = engine.get_store(store_id)
stats = store.stats()
if stats['empty'] > 0:
cstores.append(store_id)
engine.close_cashed_stores()
corrupted_stores[datatype] = cstores
return corrupted_stores
| gpl-3.0 |
arabenjamin/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
beiko-lab/gengis | bin/Lib/site-packages/mpl_toolkits/axes_grid1/axes_divider.py | 4 | 29599 | """
The axes_divider module provide helper classes to adjust the positions of
multiple axes at the drawing time.
Divider: this is the class that is used calculates the axes
position. It divides the given rectangular area into several sub
rectangles. You initialize the divider by setting the horizontal
and vertical list of sizes that the division will be based on. You
then use the new_locator method, whose return value is a callable
object that can be used to set the axes_locator of the axes.
"""
import matplotlib.transforms as mtransforms
from matplotlib.axes import SubplotBase
import axes_size as Size
class Divider(object):
"""
This is the class that is used calculates the axes position. It
divides the given rectangular area into several
sub-rectangles. You initialize the divider by setting the
horizontal and vertical lists of sizes
(:mod:`mpl_toolkits.axes_grid.axes_size`) that the division will
be based on. You then use the new_locator method to create a
callable object that can be used to as the axes_locator of the
axes.
"""
def __init__(self, fig, pos, horizontal, vertical, aspect=None, anchor="C"):
"""
:param fig: matplotlib figure
:param pos: position (tuple of 4 floats) of the rectangle that
will be divided.
:param horizontal: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for horizontal division
:param vertical: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for vertical division
:param aspect: if True, the overall rectangular area is reduced
so that the relative part of the horizontal and
vertical scales have same scale.
:param anchor: Determine how the reduced rectangle is placed
when aspect is True.
"""
self._fig = fig
self._pos = pos
self._horizontal = horizontal
self._vertical = vertical
self._anchor = anchor
self._aspect = aspect
self._xrefindex = 0
self._yrefindex = 0
self._locator = None
def get_horizontal_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_horizontal()]
def get_vertical_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_vertical()]
def get_vsize_hsize(self):
from axes_size import AddList
vsize = AddList(self.get_vertical())
hsize = AddList(self.get_horizontal())
return vsize, hsize
@staticmethod
def _calc_k(l, total_size):
rs_sum, as_sum = 0., 0.
for _rs, _as in l:
rs_sum += _rs
as_sum += _as
if rs_sum != 0.:
k = (total_size - as_sum) / rs_sum
return k
else:
return 0.
@staticmethod
def _calc_offsets(l, k):
offsets = [0.]
#for s in l:
for _rs, _as in l:
#_rs, _as = s.get_size(renderer)
offsets.append(offsets[-1] + _rs*k + _as)
return offsets
def set_position(self, pos):
"""
set the position of the rectangle.
:param pos: position (tuple of 4 floats) of the rectangle that
will be divided.
"""
self._pos = pos
def get_position(self):
"return the position of the rectangle."
return self._pos
def set_anchor(self, anchor):
"""
:param anchor: anchor position
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_anchor(self):
"return the anchor"
return self._anchor
def set_horizontal(self, h):
"""
:param horizontal: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for horizontal division
.
"""
self._horizontal = h
def get_horizontal(self):
"return horizontal sizes"
return self._horizontal
def set_vertical(self, v):
"""
:param horizontal: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for horizontal division
.
"""
self._vertical = v
def get_vertical(self):
"return vertical sizes"
return self._vertical
def set_aspect(self, aspect=False):
"""
:param anchor: True or False
"""
self._aspect = aspect
def get_aspect(self):
"return aspect"
return self._aspect
def set_locator(self, _locator):
self._locator = _locator
def get_locator(self):
return self._locator
def get_position_runtime(self, ax, renderer):
if self._locator is None:
return self.get_position()
else:
return self._locator(ax, renderer).bounds
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
figW,figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
hsizes = self.get_horizontal_sizes(renderer)
vsizes = self.get_vertical_sizes(renderer)
k_h = self._calc_k(hsizes, figW*w)
k_v = self._calc_k(vsizes, figH*h)
if self.get_aspect():
k = min(k_h, k_v)
ox = self._calc_offsets(hsizes, k)
oy = self._calc_offsets(vsizes, k)
ww = (ox[-1] - ox[0])/figW
hh = (oy[-1] - oy[0])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
else:
ox = self._calc_offsets(hsizes, k_h)
oy = self._calc_offsets(vsizes, k_v)
x0, y0 = x, y
if nx1 is None:
nx1=nx+1
if ny1 is None:
ny1=ny+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
def new_locator(self, nx, ny, nx1=None, ny1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
return AxesLocator(self, nx, ny, nx1, ny1)
def append_size(self, position, size):
if position == "left":
self._horizontal.insert(0, size)
self._xrefindex += 1
elif position == "right":
self._horizontal.append(size)
elif position == "bottom":
self._vertical.insert(0, size)
self._yrefindex += 1
elif position == "top":
self._vertical.append(size)
else:
raise ValueError("the position must be one of left, right, bottom, or top")
def add_auto_adjustable_area(self,
use_axes, pad=0.1,
adjust_dirs=["left", "right", "bottom", "top"],
):
from axes_size import Padded, SizeFromFunc, GetExtentHelper
for d in adjust_dirs:
helper = GetExtentHelper(use_axes, d)
size = SizeFromFunc(helper)
padded_size = Padded(size, pad) # pad in inch
self.append_size(d, padded_size)
class AxesLocator(object):
"""
A simple callable object, initialized with AxesDivider class,
returns the position and size of the given cell.
"""
def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):
"""
:param axes_divider: An instance of AxesDivider class.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
self._axes_divider = axes_divider
_xrefindex = axes_divider._xrefindex
_yrefindex = axes_divider._yrefindex
self._nx, self._ny = nx - _xrefindex, ny - _yrefindex
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
self._nx1 = nx1 - _xrefindex
self._ny1 = ny1 - _yrefindex
def __call__(self, axes, renderer):
_xrefindex = self._axes_divider._xrefindex
_yrefindex = self._axes_divider._yrefindex
return self._axes_divider.locate(self._nx + _xrefindex,
self._ny + _yrefindex,
self._nx1 + _xrefindex,
self._ny1 + _yrefindex,
axes,
renderer)
def get_subplotspec(self):
if hasattr(self._axes_divider, "get_subplotspec"):
return self._axes_divider.get_subplotspec()
else:
return None
from matplotlib.gridspec import SubplotSpec, GridSpec
class SubplotDivider(Divider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = map(int, s)
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit integer')
self._subplotspec = GridSpec(rows, cols)[num-1]
# num - 1 for converting from MATLAB to python indexing
elif len(args)==3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
else:
self._subplotspec = GridSpec(rows, cols)[int(num)-1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
# total = rows*cols
# num -= 1 # convert from matlab to python indexing
# # ie num in range(0,total)
# if num >= total:
# raise ValueError( 'Subplot number exceeds total subplots')
# self._rows = rows
# self._cols = cols
# self._num = num
# self.update_params()
# sets self.fixbox
self.update_params()
pos = self.figbox.bounds
horizontal = kwargs.pop("horizontal", [])
vertical = kwargs.pop("vertical", [])
aspect = kwargs.pop("aspect", None)
anchor = kwargs.pop("anchor", "C")
if kwargs:
raise Exception("")
Divider.__init__(self, fig, pos, horizontal, vertical,
aspect=aspect, anchor=anchor)
def get_position(self):
"return the bounds of the subplot box"
self.update_params() # update self.figbox
return self.figbox.bounds
# def update_params(self):
# 'update the subplot position from fig.subplotpars'
# rows = self._rows
# cols = self._cols
# num = self._num
# pars = self.figure.subplotpars
# left = pars.left
# right = pars.right
# bottom = pars.bottom
# top = pars.top
# wspace = pars.wspace
# hspace = pars.hspace
# totWidth = right-left
# totHeight = top-bottom
# figH = totHeight/(rows + hspace*(rows-1))
# sepH = hspace*figH
# figW = totWidth/(cols + wspace*(cols-1))
# sepW = wspace*figW
# rowNum, colNum = divmod(num, cols)
# figBottom = top - (rowNum+1)*figH - rowNum*sepH
# figLeft = left + colNum*(figW + sepW)
# self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
# figW, figH)
def update_params(self):
'update the subplot position from fig.subplotpars'
self.figbox = self.get_subplotspec().get_position(self.figure)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1+1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, e.g., from 1,1,1 to 2,2,3'
self._subplotspec = GridSpec(numrows, numcols)[num-1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
'get the SubplotSpec instance'
return self._subplotspec
def set_subplotspec(self, subplotspec):
'set the SubplotSpec instance'
self._subplotspec = subplotspec
class AxesDivider(Divider):
"""
Divider based on the pre-existing axes.
"""
def __init__(self, axes, xref=None, yref=None):
"""
:param axes: axes
"""
self._axes = axes
if xref==None:
self._xref = Size.AxesX(axes)
else:
self._xref = xref
if yref==None:
self._yref = Size.AxesY(axes)
else:
self._yref = yref
Divider.__init__(self, fig=axes.get_figure(), pos=None,
horizontal=[self._xref], vertical=[self._yref],
aspect=None, anchor="C")
def _get_new_axes(self, **kwargs):
axes = self._axes
axes_class = kwargs.pop("axes_class", None)
if axes_class is None:
if isinstance(axes, SubplotBase):
axes_class = axes._axes_class
else:
axes_class = type(axes)
ax = axes_class(axes.get_figure(),
axes.get_position(original=True), **kwargs)
return ax
def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the right (or left) side of the main axes.
:param size: A width of the axes. A :mod:`~mpl_toolkits.axes_grid.axes_size`
instance or if float or string is given, *from_any*
function is used to create one, with *ref_size* set to AxesX instance
of the current axes.
:param pad: pad between the axes. It takes same argument as *size*.
:param pack_start: If False, the new axes is appended at the end
of the list, i.e., it became the right-most axes. If True, it is
inserted at the start of the list, and becomes the left-most axes.
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad,
fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, pad)
self._xrefindex += 1
else:
self._horizontal.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size,
fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, size)
self._xrefindex += 1
locator = self.new_locator(nx=0, ny=0)
else:
self._horizontal.append(size)
locator = self.new_locator(nx=len(self._horizontal)-1, ny=0)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the top (or bottom) side of the main axes.
:param size: A height of the axes. A :mod:`~mpl_toolkits.axes_grid.axes_size`
instance or if float or string is given, *from_any*
function is used to create one, with *ref_size* set to AxesX instance
of the current axes.
:param pad: pad between the axes. It takes same argument as *size*.
:param pack_start: If False, the new axes is appended at the end
of the list, i.e., it became the top-most axes. If True, it is
inserted at the start of the list, and becomes the bottom-most axes.
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad,
fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, pad)
self._yrefindex += 1
else:
self._vertical.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size,
fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, size)
self._yrefindex += 1
locator = self.new_locator(nx=0, ny=0)
else:
self._vertical.append(size)
locator = self.new_locator(nx=0, ny=len(self._vertical)-1)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def append_axes(self, position, size, pad=None, add_to_figure=True,
**kwargs):
"""
create an axes at the given *position* with the same height
(or width) of the main axes.
*position*
["left"|"right"|"bottom"|"top"]
*size* and *pad* should be axes_grid.axes_size compatible.
"""
if position == "left":
ax = self.new_horizontal(size, pad, pack_start=True, **kwargs)
elif position == "right":
ax = self.new_horizontal(size, pad, pack_start=False, **kwargs)
elif position == "bottom":
ax = self.new_vertical(size, pad, pack_start=True, **kwargs)
elif position == "top":
ax = self.new_vertical(size, pad, pack_start=False, **kwargs)
else:
raise ValueError("the position must be one of left, right, bottom, or top")
if add_to_figure:
self._fig.add_axes(ax)
return ax
def get_aspect(self):
if self._aspect is None:
aspect = self._axes.get_aspect()
if aspect == "auto":
return False
else:
return True
else:
return self._aspect
def get_position(self):
if self._pos is None:
bbox = self._axes.get_position(original=True)
return bbox.bounds
else:
return self._pos
def get_anchor(self):
if self._anchor is None:
return self._axes.get_anchor()
else:
return self._anchor
def get_subplotspec(self):
if hasattr(self._axes, "get_subplotspec"):
return self._axes.get_subplotspec()
else:
return None
class HBoxDivider(SubplotDivider):
def __init__(self, fig, *args, **kwargs):
SubplotDivider.__init__(self, fig, *args, **kwargs)
@staticmethod
def _determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size):
n = len(equivalent_sizes)
import numpy as np
A = np.mat(np.zeros((n+1, n+1), dtype="d"))
B = np.zeros((n+1), dtype="d")
# AxK = B
# populated A
for i, (r, a) in enumerate(equivalent_sizes):
A[i,i] = r
A[i,-1] = -1
B[i] = -a
A[-1,:-1] = [r for r, a in appended_sizes]
B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])
karray_H = (A.I*np.mat(B).T).A1
karray = karray_H[:-1]
H = karray_H[-1]
if H > max_equivalent_size:
karray = (max_equivalent_size - \
np.array([a for r, a in equivalent_sizes])) \
/ np.array([r for r, a in equivalent_sizes])
return karray
@staticmethod
def _calc_offsets(appended_sizes, karray):
offsets = [0.]
#for s in l:
for (r, a), k in zip(appended_sizes, karray):
offsets.append(offsets[-1] + r*k + a)
return offsets
def new_locator(self, nx, nx1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
return AxesLocator(self, nx, 0, nx1, None)
def _locate(self, x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
equivalent_sizes = y_equivalent_sizes
appended_sizes = x_appended_sizes
max_equivalent_size = figH*h
total_appended_size = figW*w
karray = self._determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size)
ox = self._calc_offsets(appended_sizes, karray)
ww = (ox[-1] - ox[0])/figW
ref_h = equivalent_sizes[0]
hh = (karray[0]*ref_h[0] + ref_h[1])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
return x0, y0, ox, hh
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
figW,figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
y_equivalent_sizes = self.get_vertical_sizes(renderer)
x_appended_sizes = self.get_horizontal_sizes(renderer)
x0, y0, ox, hh = self._locate(x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH)
if nx1 is None:
nx1=nx+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0, hh
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class VBoxDivider(HBoxDivider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def new_locator(self, ny, ny1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
return AxesLocator(self, 0, ny, None, ny1)
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
figW,figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
x_equivalent_sizes = self.get_horizontal_sizes(renderer)
y_appended_sizes = self.get_vertical_sizes(renderer)
y0, x0, oy, ww = self._locate(y, x, h, w,
x_equivalent_sizes, y_appended_sizes,
figH, figW)
if ny1 is None:
ny1=ny+1
x1, w1 = x0, ww
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class LocatableAxesBase:
def __init__(self, *kl, **kw):
self._axes_class.__init__(self, *kl, **kw)
self._locator = None
self._locator_renderer = None
def set_axes_locator(self, locator):
self._locator = locator
def get_axes_locator(self):
return self._locator
def apply_aspect(self, position=None):
if self.get_axes_locator() is None:
self._axes_class.apply_aspect(self, position)
else:
pos = self.get_axes_locator()(self, self._locator_renderer)
self._axes_class.apply_aspect(self, position=pos)
def draw(self, renderer=None, inframe=False):
self._locator_renderer = renderer
self._axes_class.draw(self, renderer, inframe)
_locatableaxes_classes = {}
def locatable_axes_factory(axes_class):
new_class = _locatableaxes_classes.get(axes_class)
if new_class is None:
new_class = type("Locatable%s" % (axes_class.__name__),
(LocatableAxesBase, axes_class),
{'_axes_class': axes_class})
_locatableaxes_classes[axes_class] = new_class
return new_class
#if hasattr(maxes.Axes, "get_axes_locator"):
# LocatableAxes = maxes.Axes
#else:
def make_axes_locatable(axes):
if not hasattr(axes, "set_axes_locator"):
new_class = locatable_axes_factory(type(axes))
axes.__class__ = new_class
divider = AxesDivider(axes)
locator = divider.new_locator(nx=0, ny=0)
axes.set_axes_locator(locator)
return divider
def make_axes_area_auto_adjustable(ax,
use_axes=None, pad=0.1,
adjust_dirs=["left", "right", "bottom", "top"]):
divider = make_axes_locatable(ax)
if use_axes is None:
use_axes = ax
divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad,
adjust_dirs=adjust_dirs)
#from matplotlib.axes import Axes
from mpl_axes import Axes
LocatableAxes = locatable_axes_factory(Axes)
| gpl-3.0 |
mantidproject/mantid | qt/python/mantidqt/widgets/samplelogs/test/test_samplelogs_presenter.py | 3 | 4246 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
import matplotlib
matplotlib.use('Agg') # noqa: E402
import unittest
from unittest import mock
from mantidqt.widgets.samplelogs.model import SampleLogsModel
from mantidqt.widgets.samplelogs.presenter import SampleLogs
from mantidqt.widgets.samplelogs.view import SampleLogsView
class SampleLogsTest(unittest.TestCase):
def setUp(self):
self.view = mock.Mock(spec=SampleLogsView)
self.view.get_row_log_name = mock.Mock(return_value="Speed5")
self.view.get_selected_row_indexes = mock.Mock(return_value=[5])
self.view.get_exp = mock.Mock(return_value=1)
self.model = mock.Mock(spec=SampleLogsModel)
self.model.get_ws = mock.Mock(return_value='ws')
self.model.is_log_plottable = mock.Mock(return_value=True)
self.model.get_statistics = mock.Mock(return_value=[1,2,3,4])
self.model.get_exp = mock.Mock(return_value=0)
def test_sampleLogs(self):
presenter = SampleLogs(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.view.set_model.call_count, 1)
self.assertEqual(self.model.getItemModel.call_count, 1)
# plot_logs
presenter.plot_logs()
self.model.is_log_plottable.assert_called_once_with("Speed5")
self.assertEqual(self.model.get_ws.call_count, 1)
self.view.plot_selected_logs.assert_called_once_with('ws', 0, [5])
# update_stats
presenter.update_stats()
self.assertEqual(self.model.get_statistics.call_count, 1)
self.view.get_row_log_name.assert_called_with(5)
self.view.set_statistics.assert_called_once_with([1,2,3,4])
self.assertEqual(self.view.clear_statistics.call_count, 0)
self.view.reset_mock()
self.view.get_selected_row_indexes = mock.Mock(return_value=[2,5])
presenter.update_stats()
self.assertEqual(self.view.set_statistics.call_count, 0)
self.assertEqual(self.view.clear_statistics.call_count, 1)
# changeExpInfo
self.model.reset_mock()
self.view.reset_mock()
presenter.changeExpInfo()
self.assertEqual(self.view.get_selected_row_indexes.call_count, 3)
self.assertEqual(self.view.get_exp.call_count, 1)
self.model.set_exp.assert_called_once_with(1)
self.view.set_selected_rows.assert_called_once_with([2,5])
# clicked
self.model.reset_mock()
self.view.reset_mock()
presenter.update()
self.assertEqual(self.view.get_selected_row_indexes.call_count, 3)
# plot clicked
self.model.reset_mock()
self.view.reset_mock()
event = mock.Mock()
type(event).dblclick = mock.PropertyMock(return_value=False)
presenter.plot_clicked(event)
self.assertEqual(self.view.get_selected_row_indexes.call_count, 0)
self.assertEqual(self.view.get_row_log_name.call_count, 0)
self.assertEqual(self.model.is_log_plottable.call_count, 0)
self.assertEqual(self.view.new_plot_selected_logs.call_count, 0)
type(event).dblclick = mock.PropertyMock(return_value=True)
presenter.plot_clicked(event)
self.assertEqual(self.view.get_selected_row_indexes.call_count, 1)
self.assertEqual(self.view.get_row_log_name.call_count, 2)
self.assertEqual(self.model.is_log_plottable.call_count, 2)
self.assertEqual(self.view.new_plot_selected_logs.call_count, 1)
# double clicked
self.model.reset_mock()
self.view.reset_mock()
index = mock.Mock()
index.row = mock.Mock(return_value=7)
presenter.doubleClicked(index)
self.assertEqual(self.view.get_row_log_name.call_count, 2)
self.view.get_row_log_name.assert_called_with(5)
self.model.get_log.assert_called_with("Speed5")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
cuemacro/chartpy | chartpy/chartconstants.py | 1 | 11701 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
ChartConstants
Has various constants for the charts
"""
import os
class ChartConstants(object):
###### automatically find the root of ChartPy
root_chartpy = os.path.dirname(os.path.dirname(os.path.abspath(__file__))).replace('\\', '/') + "/chartpy/"
root_dashboard = root_chartpy + "/dashboard/"
# Twitter settings (you need to set these up on Twitter)
TWITTER_APP_KEY = "x"
TWITTER_APP_SECRET = "x"
TWITTER_OAUTH_TOKEN = "x"
TWITTER_OAUTH_TOKEN_SECRET = "x"
# for plots
chartfactory_silent_display = False
if (chartfactory_silent_display == True):
import matplotlib
matplotlib.use('Agg')
chartfactory_default_engine = "matplotlib"
chartfactory_source = "Web"
chartfactory_brand_label = "chartpy"
chartfactory_display_source_label = True
chartfactory_display_brand_label = True
chartfactory_brand_color = "#C0C0C0"
chartfactory_default_stylesheet = "chartpy"
chartfactory_style_sheet = {"chartpy" : root_chartpy + "stylesheets/chartpy.mplstyle",
"chartpy-pyfolio": root_chartpy + "stylesheets/chartpy-pyfolio.mplstyle",
"538-chartpy" : root_chartpy + "stylesheets/538-chartpy.mplstyle",
"miletus-chartpy" : root_chartpy + "stylesheets/miletus-chartpy.mplstyle",
"ggplot-chartpy" : root_chartpy + "stylesheets/ggplot-chartpy.mplstyle",
"ggplot-traditional" : root_chartpy + "stylesheets/ggplot-traditional.mplstyle"}
chartfactory_scale_factor = 3
chartfactory_dpi = 100
chartfactory_width = 600
chartfactory_height = 400
chartfactory_bubble_size_scalar = 35
########## BOKEH SETTINGS
bokeh_font = 'open sans'
bokeh_font_style = "normal"
bokeh_palette = [ '#E24A33',
'#348ABD',
'#988ED5',
'#777777',
'#FBC15E',
'#8EBA42',
'#FFB5B8']
bokeh_plot_mode = 'offline_html' # 'offline_jupyter'
########## PLOTLY SETTINGS
plotly_world_readable = False
plotly_plot_mode = 'offline_html' # 'online', 'offline_jupyter'
plotly_palette = ['#E24A33',
'#348ABD',
'#988ED5',
'#777777',
'#FBC15E',
'#8EBA42',
'#FFB5B8']
plotly_webgl = False
plotly_helper = 'cufflinks' # cufflinks or plotly_express
########## API KEYS
# Plotly default username
plotly_default_username = 'abc'
# Plotly settings (username : api_key)
plotly_creds = {"abc" : "pass",
"xyz" : "pass"
}
plotly_sharing = 'private'
plotly_streaming_key = "x"
quandl_api_key = "x"
##### Colors for plotting
# 'red' : '#E24A33',
# 'blue' : '#348ABD',
# 'purple': '#988ED5',
# 'gray' : '#777777',
# 'yellow': '#FBC15E',
# 'green' : '#8EBA42',
# 'pink' : '#FFB5B8'
chartfactory_default_colormap = 'Blues'
# nicer than the default colors of matplotlib (fully editable!)
# list of colors from http://www.github.com/santosjorge/cufflinks project
# where I've overwritten some of the primary colours (with the above)
chartfactory_color_overwrites = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#348ABD', # '#3780bf',
'bluegray': '#565656',
'bluepurple': '#6432AB',
'blueviolet': '#8A2BE2',
'brick': '#E24A33',
'brightblue': '#0000FF',
'brightred': '#FF0000',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'charcoal': '#151516',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkgrey': '#A9A9A9',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkslategrey': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'grassgreen': '#32ab60',
'gray': '#777777', # '#808080',
'green': '#8EBA42', # '#008000',
'greenyellow': '#ADFF2F',
'grey': '#808080',
'grey01': '#0A0A0A',
'grey02': '#151516',
'grey03': '#1A1A1C',
'grey04': '#1E1E21',
'grey05': '#252529',
'grey06': '#36363C',
'grey07': '#3C3C42',
'grey08': '#434343',
'grey09': '#666570',
'grey10': '#666666',
'grey11': '#8C8C8C',
'grey12': '#C2C2C2',
'grey13': '#E2E2E2',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightpink2': '#fccde5',
'lightpurple': '#bc80bd',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgray': '#D3D3D3',
'lightgreen': '#90EE90',
'lightgrey': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#B0C4DE',
'lightteal': '#8dd3c7',
'lightyellow': '#FFFFE0',
'lightblue2': '#80b1d3',
'lightviolet': '#8476CA',
'lime': '#00FF00',
'lime2': '#8EBA42',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumgray' : '#656565',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'mustard': '#FBC15E',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FF9900', # '#ff9933',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleolive': '#b3de69',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'pearl': '#D9D9D9',
'pearl02': '#F5F6F9',
'pearl03': '#E1E5ED',
'pearl04': '#9499A3',
'pearl05': '#6F7B8B',
'pearl06': '#4D5663',
'peru': '#CD853F',
'pink': '#FFB5B8', #'#ff0088',
'pinksalmon': '#FFB5B8',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#988ED5', #'#800080',
'red': '#E24A33', #'#db4052',
'rose': '#FFC0CB',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#fb8072',
'sandybrown': '#FAA460',
'seaborn': '#EAE7E4',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'slategrey': '#708090',
'smurf': '#3E6FB0',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FBC15E', #'#ffff33',
'yellowgreen': '#9ACD32'
}
override_fields = {}
# or we can store credentials in a file "chartcred.py" in the same folder, which will overwrite the above
# eg. TWITTER_APP_KEY, TWITTER_APP_SECRET, TWITTER_OAUTH_TOKEN, TWITTER_TOKEN_SECRET
# overwrite field variables with those listed in ChartCred or we can pass through an dictionary to override any fields
def __init__(self, override_fields={}):
try:
from chartpy.chartcred import ChartCred
cred_keys = ChartCred.__dict__.keys()
for k in ChartConstants.__dict__.keys():
if k in cred_keys and '__' not in k:
setattr(ChartConstants, k, getattr(ChartCred, k))
except:
pass
# Store overrided fields
if override_fields == {}:
override_fields = ChartConstants.override_fields
else:
ChartConstants.override_fields = override_fields
for k in override_fields.keys():
if '__' not in k:
setattr(ChartConstants, k, override_fields[k]) | apache-2.0 |
pearpai/TensorFlow-action | action/demo5/captcha_image.py | 1 | 1686 | # coding=utf-8
from captcha.image import ImageCaptcha # pip install captcha
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import random
# ιͺθ―η δΈηε符, ε°±δΈη¨ζ±εδΊ
number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
# alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
# 'v', 'w', 'x', 'y', 'z']
# ALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
# 'V', 'W', 'X', 'Y', 'Z']
# ιͺθ―η δΈθ¬ι½ζ θ§ε€§ε°εοΌιͺθ―η ιΏεΊ¦captcha_sizeδΈͺε符
def random_captcha_text(char_set=number, captcha_size=4):
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return captcha_text
# ηζε符对εΊηιͺθ―η
def gen_captcha_text_and_image():
image = ImageCaptcha(fonts=["/Users/wuyunfeng/Documents/machine_learn/TensorFlow-action/action/demo5/font/Farrington-7B-Qiqi.ttf"])
captcha_text = random_captcha_text(captcha_size=16)
captcha_text = ''.join(captcha_text)
captcha = image.generate(captcha_text)
# image.write(captcha_text, captcha_text + '.jpg') # εε°ζδ»Ά
captcha_image = Image.open(captcha)
captcha_image = np.array(captcha_image)
return captcha_text, captcha_image
if __name__ == '__main__':
# ζ΅θ―
text, image = gen_captcha_text_and_image()
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, text, ha='center', va='center', transform=ax.transAxes)
print image.shape
plt.imshow(image)
plt.show()
| apache-2.0 |
carrillo/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | examples/cluster/plot_cluster_comparison.py | 58 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
robcarver17/pysystemtrade | systems/rawdata.py | 1 | 11935 | from copy import copy
import pandas as pd
from systems.stage import SystemStage
from syscore.objects import resolve_function
from systems.system_cache import input, diagnostic, output
from sysdata.sim.futures_sim_data import futuresSimData
from sysdata.config.configdata import Config
class RawData(SystemStage):
"""
A SystemStage that does some fairly common calculations before we do
forecasting and which gives access to some widely used methods.
This is optional; forecasts can go straight to system.data
The advantages of using RawData are:
- preliminary calculations that are reused can be cached, to
save time (eg volatility)
- preliminary calculations are available for inspection when
diagnosing what is going on
Name: rawdata
"""
@property
def name(self):
return "rawdata"
@property
def data_stage(self) -> futuresSimData:
return self.parent.data
@property
def config(self) -> Config:
return self.parent.config
@input
def get_daily_prices(self, instrument_code) -> pd.Series:
"""
Gets daily prices
:param instrument_code: Instrument to get prices for
:type trading_rules: str
:returns: Tx1 pd.DataFrame
KEY OUTPUT
"""
self.log.msg(
"Calculating daily prices for %s" % instrument_code,
instrument_code=instrument_code,
)
dailyprice = self.data_stage.daily_prices(instrument_code)
return dailyprice
@input
def get_natural_frequency_prices(self, instrument_code: str) -> pd.Series:
self.log.msg(
"Retrieving natural prices for %s" % instrument_code,
instrument_code=instrument_code,
)
natural_prices = self.data_stage.get_raw_price(instrument_code)
return natural_prices
@input
def get_hourly_prices(self, instrument_code: str) -> pd.Series:
raw_prices = self.get_natural_frequency_prices(instrument_code)
hourly_prices = raw_prices.resample("1H").last()
return hourly_prices
@output()
def daily_denominator_price(self, instrument_code: str) -> pd.Series:
"""
Gets daily prices for use with % volatility
This won't always be the same as the normal 'price' which is normally a cumulated total return series
:param instrument_code: Instrument to get prices for
:type trading_rules: str
:returns: Tx1 pd.DataFrame
>>> from systems.tests.testdata import get_test_object
>>> from systems.basesystem import System
>>>
>>> (rawdata, data, config)=get_test_object()
>>> system=System([rawdata], data)
>>> system.rawdata.daily_denominator_price("EDOLLAR").head(2)
price
1983-09-26 71.241192
1983-09-27 71.131192
"""
dem_returns = self.get_daily_prices(instrument_code)
return dem_returns
@output()
def daily_returns(self, instrument_code: str) -> pd.Series:
"""
Gets daily returns (not % returns)
:param instrument_code: Instrument to get prices for
:type trading_rules: str
:returns: Tx1 pd.DataFrame
>>> from systems.tests.testdata import get_test_object
>>> from systems.basesystem import System
>>>
>>> (rawdata, data, config)=get_test_object()
>>> system=System([rawdata], data)
>>> system.rawdata.daily_returns("EDOLLAR").tail(2)
price
2015-12-10 -0.0650
2015-12-11 0.1075
"""
instrdailyprice = self.get_daily_prices(instrument_code)
dailyreturns = instrdailyprice.diff()
return dailyreturns
@output()
def daily_returns_volatility(self, instrument_code: str) -> pd.Series:
"""
Gets volatility of daily returns (not % returns)
This is done using a user defined function
We get this from:
the configuration object
or if not found, system.defaults.py
The dict must contain func key; anything else is optional
:param instrument_code: Instrument to get prices for
:type trading_rules: str
:returns: Tx1 pd.DataFrame
>>> from systems.tests.testdata import get_test_object
>>> from systems.basesystem import System
>>>
>>> (rawdata, data, config)=get_test_object()
>>> system=System([rawdata], data)
>>> ## uses defaults
>>> system.rawdata.daily_returns_volatility("EDOLLAR").tail(2)
vol
2015-12-10 0.054145
2015-12-11 0.058522
>>>
>>> from sysdata.configdata import Config
>>> config=Config("systems.provided.example.exampleconfig.yaml")
>>> system=System([rawdata], data, config)
>>> system.rawdata.daily_returns_volatility("EDOLLAR").tail(2)
vol
2015-12-10 0.054145
2015-12-11 0.058522
>>>
>>> config=Config(dict(volatility_calculation=dict(func="sysquant.estimators.vol.robust_vol_calc", days=200)))
>>> system2=System([rawdata], data, config)
>>> system2.rawdata.daily_returns_volatility("EDOLLAR").tail(2)
vol
2015-12-10 0.057946
2015-12-11 0.058626
"""
self.log.msg(
"Calculating daily volatility for %s" % instrument_code,
instrument_code=instrument_code,
)
dailyreturns = self.daily_returns(instrument_code)
volconfig = copy(self.config.volatility_calculation)
# volconfig contains 'func' and some other arguments
# we turn func which could be a string into a function, and then
# call it with the other ags
volfunction = resolve_function(volconfig.pop("func"))
vol = volfunction(dailyreturns, **volconfig)
return vol
@output()
def get_daily_percentage_returns(self, instrument_code: str) -> pd.Series:
"""
Get percentage returns
Useful statistic, also used for some trading rules
This is an optional subsystem; forecasts can go straight to system.data
:param instrument_code: Instrument to get prices for
:type trading_rules: str
:returns: Tx1 pd.DataFrame
"""
# UGLY
denom_price = self.daily_denominator_price(instrument_code)
num_returns = self.daily_returns(instrument_code)
perc_returns = num_returns / denom_price.ffill()
return perc_returns
@output()
def get_daily_percentage_volatility(self, instrument_code: str) -> pd.Series:
"""
Get percentage returns normalised by recent vol
Useful statistic, also used for some trading rules
This is an optional subsystem; forecasts can go straight to system.data
:param instrument_code: Instrument to get prices for
:type trading_rules: str
:returns: Tx1 pd.DataFrame
>>> from systems.tests.testdata import get_test_object
>>> from systems.basesystem import System
>>>
>>> (rawdata, data, config)=get_test_object()
>>> system=System([rawdata], data)
>>> system.rawdata.get_daily_percentage_volatility("EDOLLAR").tail(2)
vol
2015-12-10 0.055281
2015-12-11 0.059789
"""
denom_price = self.daily_denominator_price(instrument_code)
return_vol = self.daily_returns_volatility(instrument_code)
(denom_price, return_vol) = denom_price.align(return_vol, join="right")
perc_vol = 100.0 * (return_vol / denom_price.ffill())
return perc_vol
@diagnostic()
def get_daily_vol_normalised_returns(self, instrument_code: str) -> pd.Series:
"""
Get returns normalised by recent vol
Useful statistic, also used for some trading rules
This is an optional subsystem; forecasts can go straight to system.data
:param instrument_code: Instrument to get prices for
:type trading_rules: str
:returns: Tx1 pd.DataFrame
>>> from systems.tests.testdata import get_test_object
>>> from systems.basesystem import System
>>>
>>> (rawdata, data, config)=get_test_object()
>>> system=System([rawdata], data)
>>> system.rawdata.get_daily_vol_normalised_returns("EDOLLAR").tail(2)
norm_return
2015-12-10 -1.219510
2015-12-11 1.985413
"""
self.log.msg(
"Calculating normalised return for %s" % instrument_code,
instrument_code=instrument_code,
)
returnvol = self.daily_returns_volatility(instrument_code).shift(1)
dailyreturns = self.daily_returns(instrument_code)
norm_return = dailyreturns / returnvol
return norm_return
@diagnostic()
def get_cumulative_daily_vol_normalised_returns(self, instrument_code: str) -> pd.Series:
"""
Returns a cumulative normalised return. This is like a price, but with equal expected vol
Used for a few different trading rules
:param instrument_code: str
:return: pd.Series
"""
self.log.msg(
"Calculating cumulative normalised return for %s" %
instrument_code, instrument_code=instrument_code, )
norm_returns = self.get_daily_vol_normalised_returns(instrument_code)
cum_norm_returns = norm_returns.cumsum()
return cum_norm_returns
@diagnostic()
def _aggregate_daily_vol_normalised_returns_for_asset_class(self, asset_class: str) -> pd.Series:
"""
Average normalised returns across an asset class
:param asset_class: str
:return: pd.Series
"""
instruments_in_asset_class = self.data_stage.all_instruments_in_asset_class(
asset_class)
aggregate_returns_across_asset_class = [
self.get_daily_vol_normalised_returns(instrument_code)
for instrument_code in instruments_in_asset_class
]
aggregate_returns_across_asset_class = pd.concat(
aggregate_returns_across_asset_class, axis=1
)
# we don't ffill before working out the median as this could lead to
# bad data
median_returns = aggregate_returns_across_asset_class.median(axis=1)
return median_returns
@diagnostic()
def _by_asset_class_daily_vol_normalised_price_for_asset_class(self, asset_class: str) -> pd.Series:
"""
Price for an asset class, built up from cumulative returns
:param asset_class: str
:return: pd.Series
"""
norm_returns = self._aggregate_daily_vol_normalised_returns_for_asset_class(
asset_class)
norm_price = norm_returns.cumsum()
return norm_price
@output()
def normalised_price_for_asset_class(self, instrument_code: str) -> pd.Series:
"""
:param instrument_code:
:return:
"""
asset_class = self.data_stage.asset_class_for_instrument(
instrument_code)
normalised_price_for_asset_class = (
self._by_asset_class_daily_vol_normalised_price_for_asset_class(asset_class)
)
normalised_price_this_instrument = self.get_cumulative_daily_vol_normalised_returns(
instrument_code)
# Align for an easy life
# As usual forward fill at last moment
normalised_price_for_asset_class = normalised_price_for_asset_class.reindex(
normalised_price_this_instrument.index).ffill()
return normalised_price_for_asset_class
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 |
danielballan/vistools | vistools/qt_widgets.py | 1 | 3165 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from . import images
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas # noqa
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar # noqa
from matplotlib.figure import Figure
from matplotlib.cm import datad
class Xsection_widget(FigureCanvas):
def __init__(self, init_image, parent=None):
self.fig = Figure((24, 24), tight_layout=True)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
self.xsection = images.xsection_viewer(self.fig, init_image)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
_CMAPS = datad.keys()
_CMAPS.sort()
class StackScanner(QtGui.QWidget):
def __init__(self, stack, page_size=10, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout()
self._stack = stack
self._len = len(stack)
self.xsection = Xsection_widget(stack[0])
# set up slider
self._slider = QtGui.QSlider(parent=self)
self._slider.setRange(0, self._len - 1)
self._slider.setTracking(True)
self._slider.setSingleStep(1)
self._slider.setPageStep(page_size)
self._slider.valueChanged.connect(self.update_frame)
self._slider.setOrientation(QtCore.Qt.Orientation.Horizontal)
# and it's spin box
self._spinbox = QtGui.QSpinBox(parent=self)
self._spinbox.setRange(self._slider.minimum(), self._slider.maximum())
self._spinbox.valueChanged.connect(self._slider.setValue)
self._slider.valueChanged.connect(self._spinbox.setValue)
self._slider.rangeChanged.connect(self._spinbox.setRange)
# make slider layout
slider_layout = QtGui.QHBoxLayout()
slider_layout.addWidget(self._slider)
slider_layout.addWidget(self._spinbox)
# make cmap combo-box
self._cm_cb = QtGui.QComboBox()
self._cm_cb.setEditable(True)
self._cm_cb.addItems(_CMAPS)
# self._cm_cb.currentIndexChanged.connect(self.update_cmap)
self._cm_cb.setEditText('gray')
self._cm_cb.editTextChanged.connect(self.update_cmap)
self.mpl_toolbar = NavigationToolbar(self.xsection, self)
# add toolbar
layout.addWidget(self.mpl_toolbar)
# add main widget
layout.addWidget(self.xsection)
# add slider layout
layout.addLayout(slider_layout)
# add cmap selector
layout.addWidget(self._cm_cb)
self.setLayout(layout)
@QtCore.Slot(int)
def update_frame(self, n):
self.xsection.xsection.update_image(self._stack[n])
@QtCore.Slot(str)
def update_cmap(self, cmap_name):
try:
self.xsection.xsection.update_colormap(cmap_name)
except ValueError:
pass
| bsd-3-clause |
sao-eht/lmtscripts | 2017/loc.py | 1 | 9254 | # 1mm localization and total power in dreampy
# 2015, 2016 LLB
import numpy
import matplotlib
import shutil
# matplotlib.use('agg')
from matplotlib import pylab, mlab, pyplot
import os
np = numpy
plt = pyplot
# plt.ion()
from argparse import Namespace
from glob import glob
import scipy.io
from scipy.signal import butter,lfilter,freqz
from scipy.interpolate import interp1d
from scipy.ndimage.filters import minimum_filter1d
from scipy.interpolate import UnivariateSpline
from matplotlib.mlab import griddata, psd
from datetime import datetime, timedelta
from scipy.optimize import fmin
def asec2rad(asec):
return asec * 2*np.pi / 3600. / 360.
def rad2asec(rad):
return rad * 3600. * 360. / (2*np.pi)
# linear detrend, use only edges
def detrend(x, ntaper=100):
x0 = np.mean(x[:ntaper])
x1 = np.mean(x[-ntaper:])
m = (x1 - x0) / len(x)
x2 = x - (x0 + m*np.arange(len(x)))
w = np.hanning(2 * ntaper)
x2[:ntaper] *= w[:ntaper]
x2[-ntaper:] *= w[-ntaper:]
return x2
def model(x, y, x0=0, y0=0, fwhm=11.):
fwhm = asec2rad(fwhm)
sigma = fwhm / 2.335
# predicted counts
m = np.exp(-((x-x0)**2 + (y-y0)**2) / (2*sigma**2))
return m
# patch together many scans and try to align in time (to the sample -- to keep X and Y)
def mfilt(scans):
aps = []
bps = []
xs = []
ys = []
ts = []
ss = []
fss = []
ntaper = 100
for i in sorted(scans):
# custom
scan = Namespace(**scipy.io.loadmat('scandata/scan_%d.mat' % i, squeeze_me=True))
aps.append(detrend(scan.a, ntaper=ntaper))
bps.append(detrend(scan.b, ntaper=ntaper))
ts.append(scan.t + scan.t0)
xs.append(scan.x)
ys.append(scan.y)
ss.append(scan.source)
fss.append(scan.fs)
s = ss[0]
fs = fss[0]
t0 = ts[0][0]
t1 = ts[-1][-1]
tnew = np.arange(t0, t1+1./fs, 1./fs)
idx = np.zeros(len(tnew), dtype=np.bool)
x = np.zeros(len(tnew))
y = np.zeros(len(tnew))
a = np.zeros(len(tnew))
b = np.zeros(len(tnew))
for i in range(len(ts)):
istart = int(np.round((ts[i][0] - t0) * 50.))
idx[istart:istart+len(ts[i])] = True
x[istart:istart+len(xs[i])] = xs[i][:len(x)-istart]
y[istart:istart+len(ys[i])] = ys[i][:len(y)-istart]
a[istart:istart+len(aps[i])] = aps[i][:len(a)-istart]
b[istart:istart+len(bps[i])] = bps[i][:len(b)-istart]
x[~idx] = np.inf
y[~idx] = np.inf
fillfrac = float(np.sum(idx)-ntaper*len(scans)) / len(tnew)
return Namespace(t=tnew, a=a, b=b, x=x, y=y, idx=idx, source=s, fs=fs, fillfrac=fillfrac)
# custom
def getmat(iobs):
scan = Namespace(**scipy.io.loadmat('scandata/scan_%d.mat' % iobs, squeeze_me=True))
return scan
def fitmodel(z, win=50., res=2., fwhm=11., channel='b'):
Fs = z.fs
tp = z.__dict__[channel]
# 512 is balance between freq resolution and averaging, good for 50 Hz
# (p, f) = psd(tp, NFFT=1024, pad_to=4096, Fs=Fs) # unit variance -> PSD = 1 / Hz
(p, f) = psd(tp, NFFT=1024, pad_to=4096) # unit variance -> PSD = 1 = variance of complex FFT (1/sqrt(N))
# we will take out the 1/Hz normalization later, to get unit variance per complex data point
if 'fillfrac' in z:
p = p / z.fillfrac # account for zeros in stiched timeseries (otherwise 1)
N = len(z.t) # original sequence length
pad = 2**int(np.ceil(np.log2(N))) # pad length for efficient FFTs
fac = np.zeros(pad)
mpad = np.zeros(pad)
bpad = np.zeros(pad)
ipad = np.zeros(pad).astype(bool)
# N=32768; xtest=0; ytest=0; win=10.; res=1.; fwhm=11.; channel='b'
bpad[:N] = tp # fails if N = len(tp) ??
B = np.fft.rfft(bpad).conj() # N factor goes into fft, ifft = 1/N * ..
# fm = np.abs(np.fft.fftfreq(pad, d=1./Fs)[:1+pad/2])
# fac = 1. / interp1d(f, p)(fm) / (Fs/2.) # 1/PSD for matched filter (double whiten), Fs/2 accounts for PSD normalization
fm = np.abs(np.fft.fftfreq(pad, d=1./2.)[:1+pad/2]) # the default nyquist units
fac = 1. / interp1d(f, p)(fm) # 1/PSD for matched filter (double whiten), 1. var => 1. fac (no change)
# apply sqrt(fac) to freq domain normalizes var(x)=1 using any fft/ifft transform pair
# 1. var => 1. var for 1/sqrt(N) normalization, so x[k] if properly whitened for 1/sqrt(N)
fac[fm < 0.1 * (2./Fs)] = 0. # turn off low freqs below 0.1 Hz - just a guess
# np.fft.irfft(B*np.sqrt(fac)) gives unit standard deviation timeseries
x = asec2rad(np.arange(-win, win+res, res))
y = asec2rad(np.arange(-win, win+res, res))
(xx, yy) = np.meshgrid(x, y) # search grid
xr = xx.ravel()
yr = yy.ravel()
snrs = [] # signal-to-noise ratios
norms = [] # sqrt of whitened matched filter signal power
for (xtest, ytest) in zip(xr, yr):
mpad[:N] = model(z.x, z.y, xtest, ytest, fwhm=fwhm) # model signal
M = np.fft.rfft(mpad) # M big by sqrt(N) factor
# take the real part of sum = 0.5 * ifft[0]
norm = np.sqrt(np.sum(np.abs(M)**2 * fac)) # sqrt(N)/sqrt(2) factor total for norm
norms.append(norm)
# M=sqrt(N), B=sqrt(N), sum=1/2., norm=sqrt(N)/sqrt(2) => sqrt(N)/sqrt(2) SNR factor
snrs.append(np.sum((M * B * fac).real) / norm)
snr = np.array(snrs)
snr[snr < 0] = 0.
imax = np.argmax(snr) # maximum snr location
snr = snr.reshape(xx.shape)
isnr = np.argsort(snr.ravel())[::-1] # reverse sort high to low
# snr_true => snr/np.sqrt(pad/2)
# [snr_true*np.sqrt(pad/2)] / [norm*(sqrt(pad/2))] = htrue
# snr_true is amplitude of signal in units of normalized filter, need to get in units of unnomralized filter
prob = np.exp((snr.ravel()/np.sqrt(pad/2.))**2/2.)
pcum = np.zeros_like(prob)
pcum[isnr] = np.cumsum(prob[isnr])
pcum = pcum.reshape(xx.shape) / np.sum(prob)
xxa = xx * rad2asec(1.)
yya = yy * rad2asec(1.)
return Namespace(xx=xxa, yy=yya, snr=snr/np.sqrt(pad/2.), v=1e3*snr/np.array(norms).reshape(xx.shape), prob=prob, pcum=pcum)
def point(first, last=None, win=10., res=0.5, fwhm=11., channel='b', clf=True):
if last is None:
last = first
scans = range(first, last+1)
z = mfilt(scans)
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(z.x))))
a = fitmodel(z, win=win, res=res, fwhm=fwhm, channel=channel)
(xxa, yya, snr, v, prob, pcum) = (a.xx, a.yy, a.snr, a.v, a.prob, a.pcum)
print np.max(snr)
if clf:
plt.clf()
plt.pcolormesh(xxa, yya, 1e3*v)
# plt.pcolormesh(xxa, yya, s**2)
plt.colorbar()
h1 = plt.contour(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='blue', lw=2)
# h1 = plt.contourf(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), cmap=plt.cm.get_cmap("Blues"))
plt.plot(rad2asec(z.x), rad2asec(z.y), 'y-')
# plt.gca().set_axis_bgcolor('black')
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win, win)
plt.xlim(-win, win)
imax = np.argmax(snr.ravel())
(xmax, ymax) = (xxa.ravel()[imax], yya.ravel()[imax])
plt.plot(xmax, ymax, 'y+', ms=11, mew=2)
plt.text(-win, win, '[%.1f, %.1f]' % (xmax, ymax), va='top', ha='left', color='black')
plt.text(win, win, '[%.2f mV]' % v.ravel()[imax], va='top', ha='right', color='black')
if len(scans) == 1:
plt.title("%s: %d" % (z.source, scans[0]))
else:
plt.title("%s: [%d - %d]" % (z.source, scans[0], scans[-1]))
def point2(first, last=None, win=10., res=0.5, fwhm=11., channel='b', clf=True):
if last is None:
last = first
scans = range(first, last+1)
z = mfilt(scans)
z.x = z.x - np.mean(z.x) # hack
if win is None:
win = np.ceil(rad2asec(np.abs(np.min(z.x))))
a = fitmodel(z, win=win, res=res, fwhm=fwhm, channel=channel)
(xxa, yya, snr, v, prob, pcum) = (a.xx, a.yy, a.snr, a.v, a.prob, a.pcum)
n68 = len(pcum.ravel()) - np.sum(pcum.ravel() > 0.68268949213708585)
a68 = n68 * (res**2)
e68 = np.sqrt(res**2 + (a68 / np.pi))
i3s = (pcum.ravel() < 0.99730020393673979)
v3s = v.ravel()[i3s]
p3s = prob.ravel()[i3s]
vmean = np.sum(v3s * p3s) / np.sum(p3s) # expectation value of v3s
v3s2 = (v3s - vmean)**2
vstd = np.sqrt(np.sum(v3s2 * p3s) / np.sum(p3s)) # std
print np.max(snr)
if clf:
plt.clf()
plt.axis(aspect=1.0)
# plt.pcolormesh(xxa, yya, v, cmap='afmhot_r')
plt.imshow(v, extent=(-win-res/2., win+res/2., -win-res/2., win+res/2.), interpolation='nearest', origin='lower', cmap='afmhot_r')
plt.plot(rad2asec(z.x), rad2asec(z.y), '-', color='violet', ls='--', lw=1.5, alpha=0.75)
# plt.pcolormesh(xxa, yya, s**2)
# plt.colorbar()
h1 = plt.contour(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), colors='cyan', linewidths=2, alpha=1.0)
# h1 = plt.contourf(xxa, yya, pcum, scipy.special.erf(np.array([0,1,2,3])/np.sqrt(2)), cmap=plt.cm.get_cmap("Blues"))
# plt.gca().set_axis_bgcolor('black')
plt.gca().set_axis_bgcolor('white')
plt.grid(alpha=0.5)
plt.ylim(-win-res/2, win+res/2)
plt.xlim(-win-res/2, win+res/2)
imax = np.argmax(snr.ravel())
(xmax, ymax) = (xxa.ravel()[imax], yya.ravel()[imax])
plt.plot(xmax, ymax, 'y+', ms=11, mew=2)
plt.text(-0.99*win-res/2, 0.98*win+res/2, '[%.1f, %.1f] $\pm$ %.1f"' % (xmax, ymax, e68), va='top', ha='left', color='black')
# plt.text(win, win, '[%.2f $\pm$ 2 mV]' % v.ravel()[imax], va='top', ha='right', color='black')
plt.text(.99*win+res/2, .98*win+res/2, '[%.1f $\pm$ %.1f mV]' % (vmean, vstd), va='top', ha='right', color='black')
# if len(scans) == 1:
# plt.title("%s: %d" % (z.source, scans[0]))
# else:
# plt.title("%s: [%d - %d]" % (z.source, scans[0], scans[-1]))
plt.title('3C 273')
plt.xlabel('$\Delta$x [arcsec]')
plt.ylabel('$\Delta$y [arcsec]')
plt.gca().set_aspect(1.0)
plt.tight_layout()
| mit |
dvida/UnknownPleasuresGenerator | UnknownPleasuresGenerator.py | 1 | 3075 | import matplotlib.pyplot as plt
import numpy as np
# Number of curves to plot
curves_no = 80
# Curve vertical spacing
curve_v_space = 3
# Maximum munber of peaks in the center
max_peaks = 9
# Maximum parabolic peak amplitude
max_parab_peak_amplitude = 0.05
# Maximum pointy peak amplitude
max_point_peak_amplitude = 0.3
#Peak noise
peak_noise = 0.2
# Max peak width
max_peak_width = 12
# Space between the ends and the central wavy part
parab_sides = 55
# Vertical spacing
v_space = 15
# Number of points in time
time_points = 220
# How much to shift the curves up (needed for proper filling the space below curves)
move_up = 500
# Generate X axis numbers
t = np.linspace(-5, 5, time_points)
y_min = move_up
y_max = 0
for i in range(curves_no):
# Generate oscillations
y = 0.1*np.sin(15*t + 10) + 0.1*np.sin(2*t + 5) + 0.1*np.sin(t+1)
# Generate central part
parab = - 0.6 * t**2
parab[0:parab_sides] = parab[parab_sides]
parab[-parab_sides:] = parab[-parab_sides]
# Generate central noise
parab_noise = np.sin(np.random.normal(0, 0.1, time_points) * t)
parab_noise[0:parab_sides] = parab_noise[parab_sides]
parab_noise[-parab_sides:] = parab_noise[-parab_sides]
# Generate peaks in the centre
peak_sum = np.zeros((time_points))
for j in range(int(np.random.uniform(max_peaks/2, max_peaks, 1))):
peak_time = int(np.random.uniform(0+parab_sides*1.2, time_points-parab_sides*1.2, 1))
peak_width = int(np.random.uniform(max_peak_width/2, max_peak_width, 1))
peak_t = np.linspace(peak_time - peak_width, peak_time+peak_width, peak_width*2)
peak_parab = np.zeros((time_points))
# Peak is an upwardly pointing parabola
#peak_parab[peak_time - peak_width : peak_time+peak_width] = - np.random.uniform(0, max_parab_peak_amplitude, 1) * (peak_t - peak_time)**2 - np.random.uniform(max_point_peak_amplitude/4, max_point_peak_amplitude, 1) * np.abs(peak_t - peak_time)
peak_parab[peak_time - peak_width : peak_time+peak_width] = - np.random.uniform(0, max_parab_peak_amplitude, 1) * (peak_t - peak_time)**2 - np.random.exponential(max_point_peak_amplitude, 1) * np.abs(peak_t - peak_time)
#peak_parab[peak_time - peak_width : peak_time+peak_width] = -np.abs(peak_t - peak_time) * 2
min_peak = min(peak_parab)
peak_parab[peak_time - peak_width : peak_time+peak_width] -= min_peak
# Add noise to peak
peak_parab[peak_time - peak_width : peak_time+peak_width] += np.random.normal(0, peak_noise, peak_width*2)
peak_sum += peak_parab
#Make random noise
s = np.random.normal(0,0.1,time_points)
y = y + s + parab + parab_noise - i*curve_v_space + peak_sum + move_up
# Plot the curve in the right order
plt.plot(t, y, c = 'w', lw = 1.6, zorder = i)
ax = plt.gca()
# Fill with black below the curve
ax.fill_between(t, y, interpolate=True, color='black', zorder = i)
# Track max and min values
if min(y) < y_min:
y_min = min(y)
if max(y) > y_max:
y_max = max(y)
# Change background to balck
ax.patch.set_facecolor('black')
#Set plot limits
plt.ylim((y_min - v_space, y_max + v_space))
# Show final plot
plt.show() | gpl-2.0 |
rosswhitfield/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSBeamCentreFinder.py | 3 | 19739 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=too-few-public-methods
""" Finds the beam centre for SANS"""
import numpy as np
from mantid import AnalysisDataService
from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode, Progress)
from mantid.kernel import (Direction, StringListValidator, Logger)
from mantid.simpleapi import CloneWorkspace, GroupWorkspaces
from sans.algorithm_detail.beamcentrefinder_plotting import can_plot_beamcentrefinder, plot_workspace_quartiles
from sans.algorithm_detail.crop_helper import get_component_name
from sans.algorithm_detail.single_execution import perform_can_subtraction
from sans.algorithm_detail.strip_end_nans_and_infs import strip_end_nans
from sans.common.constants import EMPTY_NAME
from sans.common.enums import (DetectorType, MaskingQuadrant, FindDirectionEnum)
from sans.common.file_information import get_instrument_paths_for_sans_file
from sans.common.general_functions import create_child_algorithm
from sans.common.xml_parsing import get_named_elements_from_ipf_file
from sans.state.Serializer import Serializer
class SANSBeamCentreFinder(DataProcessorAlgorithm):
def category(self):
return 'SANS\\BeamCentreFinder'
def summary(self):
return 'Finds the position of the beam centre'
def PyInit(self):
# ----------
# INPUT
# ----------
# Workspace which is to be cropped
self.declareProperty('SANSState', '',
doc='A JSON string which fulfills the SANSState contract.')
self.declareProperty(MatrixWorkspaceProperty("SampleScatterWorkspace", '',
optional=PropertyMode.Mandatory, direction=Direction.Input),
doc='The sample scatter data')
self.declareProperty(MatrixWorkspaceProperty("SampleScatterMonitorWorkspace", '',
optional=PropertyMode.Mandatory, direction=Direction.Input),
doc='The sample scatter monitor data')
self.declareProperty(MatrixWorkspaceProperty("SampleTransmissionWorkspace", '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The sample transmission data')
self.declareProperty(MatrixWorkspaceProperty("SampleDirectWorkspace", '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The sample direct data')
self.declareProperty(MatrixWorkspaceProperty("CanScatterWorkspace", '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The can scatter data')
self.declareProperty(MatrixWorkspaceProperty("CanScatterMonitorWorkspace", '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The can scatter monitor data')
self.declareProperty(MatrixWorkspaceProperty("CanTransmissionWorkspace", '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The can transmission data')
self.declareProperty(MatrixWorkspaceProperty("CanDirectWorkspace", '',
optional=PropertyMode.Optional, direction=Direction.Input),
doc='The can direct data')
# The component, i.e. HAB or LAB
allowed_detectors = StringListValidator([DetectorType.LAB.value,
DetectorType.HAB.value])
self.declareProperty("Component", DetectorType.LAB.value,
validator=allowed_detectors, direction=Direction.Input,
doc="The component of the instrument which is to be reduced.")
self.declareProperty("Iterations", 10, direction=Direction.Input, doc="The maximum number of iterations.")
self.declareProperty("RMin", 0.6, direction=Direction.Input, doc="The inner radius of the quartile mask")
self.declareProperty('RMax', 0.28, direction=Direction.Input, doc="The outer radius of the quartile mask")
self.declareProperty('Position1Start', 0.0, direction=Direction.Input, doc="The search start position1")
self.declareProperty('Position2Start', 0.0, direction=Direction.Input, doc="The search start position2")
self.declareProperty('Tolerance', 0.0001251, direction=Direction.Input, doc="The search tolerance")
self.declareProperty('Direction', FindDirectionEnum.ALL.value, direction=Direction.Input,
doc="The search direction is an enumerable which can be either All, LeftRight or UpDown")
self.declareProperty('Verbose', False, direction=Direction.Input,
doc="Whether to keep workspaces from each iteration in ADS.")
# ----------
# Output
# ----------
# Workspace which is to be cropped
self.declareProperty('Centre1', 0.0, direction=Direction.Output,
doc="The centre position found in the first dimension")
self.declareProperty('Centre2', 0.0, direction=Direction.Output,
doc="The centre position found in the second dimension")
def PyExec(self):
self.logger = Logger("CentreFinder")
self.logger.notice("Starting centre finder routine...")
self.sample_scatter = self._get_cloned_workspace("SampleScatterWorkspace")
self.sample_scatter_monitor = self._get_cloned_workspace("SampleScatterMonitorWorkspace")
self.sample_transmission = self._get_cloned_workspace("SampleTransmissionWorkspace")
self.sample_direct = self._get_cloned_workspace("SampleDirectWorkspace")
self.can_scatter = self._get_cloned_workspace("CanScatterWorkspace")
self.can_scatter_monitor = self._get_cloned_workspace("CanScatterMonitorWorkspace")
self.can_transmission = self._get_cloned_workspace("CanTransmissionWorkspace")
self.can_direct = self._get_cloned_workspace("CanDirectWorkspace")
self.component = self.getProperty("Component").value
self.r_min = self.getProperty("RMin").value
self.r_max = self.getProperty("RMax").value
self.state = self._get_state()
instrument = self.sample_scatter.getInstrument()
self.scale_1 = 1.0 if instrument.getName() == 'LARMOR' else 1000
self.scale_2 = 1000
centre_1_hold, centre_2_hold = self._find_centres()
self.setProperty("Centre1", centre_1_hold)
self.setProperty("Centre2", centre_2_hold)
self.logger.notice("Centre coordinates updated: [{}, {}]".format(centre_1_hold * self.scale_1,
centre_2_hold * self.scale_2))
def _find_centres(self):
progress = self._get_progress()
verbose = self.getProperty('Verbose').value
position_lr_step, position_tb_step = self.get_position_steps(self.state)
centre_lr = self.getProperty("Position1Start").value
centre_tb = self.getProperty("Position2Start").value
tolerance = self.getProperty("Tolerance").value
diff_left_right = []
diff_top_bottom = []
centre_lr_hold = centre_lr
centre_tb_hold = centre_tb
max_iterations = self.getProperty("Iterations").value
for i in range(0, max_iterations + 1):
if i != 0:
centre_lr += position_lr_step
centre_tb += position_tb_step
progress.report("Reducing ... Pos1 " + str(centre_lr) + " Pos2 " + str(centre_tb))
sample_quartiles = self._run_all_reductions(centre_lr, centre_tb)
output_workspaces = self._publish_to_ADS(sample_quartiles)
if verbose:
self._rename_and_group_workspaces(i, output_workspaces)
lr_results = self._calculate_residuals(sample_quartiles[MaskingQuadrant.LEFT],
sample_quartiles[MaskingQuadrant.RIGHT])
tb_results = self._calculate_residuals(sample_quartiles[MaskingQuadrant.TOP],
sample_quartiles[MaskingQuadrant.BOTTOM])
self._print_results(lr_results=lr_results, tb_results=tb_results,
centre_lr=centre_lr, centre_tb=centre_tb, iteration=i)
diff_left_right.append(lr_results.total_residual)
diff_top_bottom.append(tb_results.total_residual)
if i == 0:
self._plot_current_result(output_workspaces)
else:
# have we stepped across the y-axis that goes through the beam center?
if diff_left_right[i] > diff_left_right[i - 1]:
# yes with stepped across the middle, reverse direction and half the step size
position_lr_step = - position_lr_step / 2
if diff_top_bottom[i] > diff_top_bottom[i - 1]:
position_tb_step = - position_tb_step / 2
if (diff_left_right[i] + diff_top_bottom[i]) < (diff_left_right[i - 1] + diff_top_bottom[i - 1]) or \
self.state.compatibility.use_compatibility_mode:
centre_lr_hold = centre_lr
centre_tb_hold = centre_tb
if abs(position_lr_step) < tolerance and abs(position_tb_step) < tolerance:
# this is the success criteria, we've close enough to the center
self.logger.notice("Converged - check if stuck in local minimum! ")
break
if i == max_iterations:
self.logger.notice("Out of iterations, new coordinates may not be the best")
return centre_lr_hold, centre_tb_hold
def _print_results(self, iteration, centre_lr, centre_tb, lr_results, tb_results):
scaled_lr = self.scale_1 * centre_lr
scaled_tb = self.scale_2 * centre_tb
avg_lr_residual = lr_results.total_residual / lr_results.num_points_considered
avg_tb_residual = tb_results.total_residual / tb_results.num_points_considered
iter_details = "Itr {:02d}: ({:7.3f}, {:7.3f}) SX={:.3e} SY={:.3e} Points: {:3d} (Unaligned: {:2d})" \
.format(iteration, scaled_lr, scaled_tb,
avg_lr_residual, avg_tb_residual,
lr_results.num_points_considered, lr_results.mismatched_points)
self.logger.notice(iter_details)
def _plot_current_result(self, output_workspaces):
if can_plot_beamcentrefinder():
break_loop = self._plot_workspaces(output_workspaces, self.state.data.sample_scatter)
if break_loop:
# If workspaces contain NaN values, stop the process.
raise WorkspaceContainsNanValues()
def _run_all_reductions(self, centre1, centre2):
sample_quartiles = self._run_quartile_reduction(scatter_workspace=self.sample_scatter,
transmission_workspace=self.sample_transmission,
direct_workspace=self.sample_direct,
scatter_monitor_workspace=self.sample_scatter_monitor,
data_type="Sample",
centre1=centre1, centre2=centre2)
if self.can_scatter:
can_quartiles = self._run_quartile_reduction(scatter_workspace=self.can_scatter,
transmission_workspace=self.can_transmission,
direct_workspace=self.can_direct,
data_type="Can",
scatter_monitor_workspace=self.can_scatter_monitor,
centre1=centre1, centre2=centre2)
for key in sample_quartiles:
sample_quartiles[key] = perform_can_subtraction(sample_quartiles[key], can_quartiles[key], self)
return sample_quartiles
def get_position_steps(self, state):
instrument_file = get_instrument_paths_for_sans_file(state.data.sample_scatter)
position_1_step = get_named_elements_from_ipf_file(
instrument_file[1], ["centre-finder-step-size"], float)['centre-finder-step-size']
try:
position_2_step = get_named_elements_from_ipf_file(
instrument_file[1], ["centre-finder-step-size2"], float)['centre-finder-step-size2']
except:
position_2_step = position_1_step
find_direction = self.getProperty("Direction").value
if find_direction == FindDirectionEnum.LEFT_RIGHT.value:
position_2_step = 0.0
elif find_direction == FindDirectionEnum.UP_DOWN.value:
position_1_step = 0.0
return position_1_step, position_2_step
def _plot_workspaces(self, output_workspaces, sample_scatter):
try:
# Check for NaNs in workspaces
self._validate_workspaces(output_workspaces)
except ValueError as e:
self.logger.notice("Stopping process: {}. Check radius limits.".format(str(e)))
return True
else:
plot_workspace_quartiles(output_workspaces, sample_scatter)
return False
@staticmethod
def _rename_and_group_workspaces(index, output_workspaces):
to_group = []
for workspace in output_workspaces:
CloneWorkspace(InputWorkspace=workspace, OutputWorkspace='{}_{}'.format(workspace, index))
to_group.append('{}_{}'.format(workspace, index))
GroupWorkspaces(InputWorkspaces=to_group, OutputWorkspace='Iteration_{}'.format(index))
@staticmethod
def _publish_to_ADS(sample_quartiles):
output_workspaces = []
for key in sample_quartiles:
assert isinstance(key, MaskingQuadrant)
output_workspaces.append(key.value)
AnalysisDataService.addOrReplace(key.value, sample_quartiles[key])
return output_workspaces
@staticmethod
def _validate_workspaces(workspaces):
"""
This method checks if any of the workspaces to plot contain NaN values.
:param workspaces: A list of workspace names
:return: A list of workspaces (used in matplotlib plotting). Raises if NaN values present.
"""
workspaces = AnalysisDataService.Instance().retrieveWorkspaces(workspaces, unrollGroups=True)
for ws in workspaces:
if np.isnan(ws.readY(0)).any():
# All data can be NaN if bounds are too close together
# this makes the data unplottable
raise ValueError("Workspace contains NaN values.")
return workspaces
def _get_cloned_workspace(self, workspace_name):
workspace = self.getProperty(workspace_name).value
if workspace:
clone_name = "CloneWorkspace"
clone_options = {"InputWorkspace": workspace,
"OutputWorkspace": EMPTY_NAME}
clone_alg = create_child_algorithm(self, clone_name, **clone_options)
clone_alg.execute()
return clone_alg.getProperty("OutputWorkspace").value
return ''
def _run_quartile_reduction(self, scatter_workspace, transmission_workspace, direct_workspace, data_type,
scatter_monitor_workspace, centre1, centre2):
serialized_state = self.getProperty("SANSState").value
algorithm_name = "SANSBeamCentreFinderCore"
alg_options = {"ScatterWorkspace": scatter_workspace,
"ScatterMonitorWorkspace": scatter_monitor_workspace,
"TransmissionWorkspace": transmission_workspace,
"DirectWorkspace": direct_workspace,
"Component": self.component,
"SANSState": serialized_state,
"DataType": data_type,
"Centre1": centre1,
"Centre2": centre2,
"OutputWorkspaceLeft": EMPTY_NAME,
"OutputWorkspaceRight": EMPTY_NAME,
"OutputWorkspaceTop": EMPTY_NAME,
"OutputWorkspaceBottom": EMPTY_NAME,
"RMax": self.r_max,
"RMin": self.r_min}
alg = create_child_algorithm(self, algorithm_name, **alg_options)
alg.execute()
out_left = strip_end_nans(alg.getProperty("OutputWorkspaceLeft").value, self)
out_right = strip_end_nans(alg.getProperty("OutputWorkspaceRight").value, self)
out_top = strip_end_nans(alg.getProperty("OutputWorkspaceTop").value, self)
out_bottom = strip_end_nans(alg.getProperty("OutputWorkspaceBottom").value, self)
return {MaskingQuadrant.LEFT: out_left, MaskingQuadrant.RIGHT: out_right, MaskingQuadrant.TOP: out_top,
MaskingQuadrant.BOTTOM: out_bottom}
def _get_component(self, workspace):
component = DetectorType(self.component)
return get_component_name(workspace, component)
def _get_state(self):
state_json = self.getProperty("SANSState").value
state = Serializer.from_json(state_json)
return state
def _calculate_residuals(self, quartile1, quartile2):
yvalsAX = quartile1.readY(0)
yvalsBX = quartile2.readY(0)
qvalsAX = quartile1.readX(0)
qvalsBX = quartile2.readX(0)
A_vals_dict = dict(zip(qvalsAX, yvalsAX))
B_vals_dict = dict(zip(qvalsBX, yvalsBX))
residue = 0.0
mismatched_points = 0
for key in B_vals_dict:
if key not in A_vals_dict:
A_vals_dict[key] = 0.0
mismatched_points += 1
for key in A_vals_dict:
if key not in B_vals_dict:
B_vals_dict[key] = 0.0
mismatched_points += 1
assert len(A_vals_dict) == len(B_vals_dict)
for key in A_vals_dict and B_vals_dict:
residue += pow(A_vals_dict[key] - B_vals_dict[key], 2)
self.logger.information("Beam Centre Diff: {0}".format(residue))
return _ResidualsDetails(mismatched_points=mismatched_points, num_points_considered=len(A_vals_dict),
total_residual=residue)
def _get_progress(self):
return Progress(self, start=0.0, end=1.0, nreports=10)
class WorkspaceContainsNanValues(Exception):
pass
class _ResidualsDetails(object):
def __init__(self, num_points_considered, mismatched_points, total_residual):
self.num_points_considered = num_points_considered
self.mismatched_points = mismatched_points
self.total_residual = total_residual
# Register algorithm with Mantid
AlgorithmFactory.subscribe(SANSBeamCentreFinder)
| gpl-3.0 |
francisleunggie/openface | demos/sphere.py | 7 | 8951 | #!/usr/bin/env python2
# projectS and projectC were written by Gabriele Farina.
import time
start = time.time()
import argparse
import cv2
import os
import dlib
import numpy as np
np.set_printoptions(precision=2)
import openface
from matplotlib import cm
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
def getRep(bgrImg):
start = time.time()
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
# Get all bounding boxes
bb = align.getAllFaceBoundingBoxes(rgbImg)
if bb is None:
# raise Exception("Unable to find a face: {}".format(imgPath))
return None
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bb:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
for alignedFace in alignedFaces:
reps.append(net.forward(alignedFace))
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
return reps
def projectS(rho, theta, z):
p = np.array([np.sqrt(3.) * rho * (np.cos(theta) + np.sin(theta)) / 2.,
z + 1. + rho * (np.cos(theta) - np.sin(theta)) / 2.])
p += np.array([1.5, 0.5])
p /= 3.
return p
def projectC(x, y, z):
rho = np.sqrt(x**2 + y**2)
if x == 0 and y == 0:
theta = 0
elif x >= 0:
theta = np.arcsin(y / rho)
else:
theta = -np.arcsin(y / rho) + np.pi
return projectS(rho, theta, z)
def draw(pts=[], clrs=[], cSz=400):
def toFrame(x):
return tuple((cSz * x).astype(np.int32))
cFrame = np.full((cSz, cSz, 3), 255, dtype=np.uint8)
for z in np.linspace(-1, 1, 9):
r = np.sqrt(1. - z**2)
last = None
for theta in np.linspace(0, 2 * np.pi, 50):
x = toFrame(projectS(r, theta, z))
if last is not None:
cv2.line(cFrame, x, last, color=(0, 0, 0))
last = x
for x in np.linspace(-1, 1, 9):
last = None
for theta in np.linspace(0, 2 * np.pi, 50):
r = np.sqrt(1. - x**2)
z = r * np.sin(theta)
y = r * np.cos(theta)
# x = toFrame(projectS(r, theta, z))
p = toFrame(projectC(x, y, z))
if last is not None:
cv2.line(cFrame, p, last, color=(0, 0, 0))
last = p
s = 1
x = toFrame(projectC(-s, 0, 0))
y = toFrame(projectC(s, 0, 0))
cv2.line(cFrame, x, y, color=(0, 0, 0), thickness=4)
x = toFrame(projectC(0, -s, 0))
y = toFrame(projectC(0, s, 0))
cv2.line(cFrame, x, y, color=(0, 0, 0), thickness=4)
x = toFrame(projectC(0, 0, -s))
y = toFrame(projectC(0, 0, s))
cv2.line(cFrame, x, y, color=(0, 0, 0), thickness=4)
for pt, c in zip(pts, clrs):
fPt = toFrame(projectC(pt[0], pt[1], pt[2]))
fPt_noz = toFrame(projectC(pt[0], pt[1], 0))
fPt_nozy = toFrame(projectC(pt[0], 0, 0))
fPt_nozx = toFrame(projectC(0, pt[1], 0))
cv2.line(cFrame, fPt, fPt_noz, color=c, thickness=2)
cv2.line(cFrame, fPt_noz, fPt_nozy, color=c, thickness=2)
cv2.line(cFrame, fPt_noz, fPt_nozx, color=c, thickness=2)
cv2.circle(cFrame, fPt, 5, color=c, thickness=-1)
return cFrame
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default='nn4.small2.3d.v1.t7')
# Download the 3D model from:
# https://storage.cmusatyalab.org/openface-models/nn4.small2.3d.v1.t7
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument(
'--captureDevice',
type=int,
default=0,
help='Capture device. 0 for latop webcam and 1 for usb webcam')
# parser.add_argument('--width', type=int, default=640)
# parser.add_argument('--height', type=int, default=480)
parser.add_argument('--width', type=int, default=1280)
parser.add_argument('--height', type=int, default=800)
parser.add_argument('--scale', type=int, default=0.25)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(
args.networkModel,
imgDim=args.imgDim,
cuda=args.cuda)
# Capture device. Usually 0 will be webcam and 1 will be usb cam.
video_capture = cv2.VideoCapture(args.captureDevice)
video_capture.set(3, args.width)
video_capture.set(4, args.height)
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
class Tracker:
def __init__(self, img, bb, rep):
self.t = dlib.correlation_tracker()
self.t.start_track(img, bb)
self.rep = rep
self.bb = bb
self.pings = 0
def updateRep(self, rep):
self.pings = 0
alpha = 0.9
self.rep = alpha * self.rep + (1. - alpha) * rep
return self.rep
def overlap(self, bb):
p = float(self.bb.intersect(bb).area()) / float(self.bb.area())
return p > 0.3
def ping(self):
self.pings += 1
trackers = []
while True:
ret, frame = video_capture.read()
frame = cv2.flip(frame, 1)
frameSmall = cv2.resize(frame, (int(args.width * args.scale),
int(args.height * args.scale)))
bbs = align.getAllFaceBoundingBoxes(frameSmall)
pts, clrs = [], []
for i, bb in enumerate(bbs):
alignedFace = align.align(96, frameSmall, bb,
landmarkIndices=openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP)
rep = net.forward(alignedFace)
center = bb.center()
centerI = 0.7 * center.x * center.y / \
(args.scale * args.scale * args.width * args.height)
color_np = cm.Set1(centerI)
color_cv = list(np.multiply(color_np[:3], 255))
bl = (int(bb.left() / args.scale), int(bb.bottom() / args.scale))
tr = (int(bb.right() / args.scale), int(bb.top() / args.scale))
cv2.rectangle(frame, bl, tr, color=color_cv, thickness=3)
tracked = False
for i in xrange(len(trackers) - 1, -1, -1):
t = trackers[i]
t.t.update(frame)
if t.overlap(bb):
rep = t.updateRep(rep)
pts.append(rep)
clrs.append(color_cv)
tracked = True
break
if not tracked:
trackers.append(Tracker(frame, bb, rep))
pts.append(rep)
clrs.append(color_cv)
for i in xrange(len(trackers) - 1, -1, -1):
t = trackers[i]
t.ping()
if t.pings > 10:
del trackers[i]
continue
for j in range(i):
if t.t.get_position().intersect(trackers[j].t.get_position()).area() / \
t.t.get_position().area() > 0.4:
del trackers[i]
continue
cSz = 450
sphere = np.copy(frame)
sphere[0:cSz, 0:cSz, :] = draw(pts, clrs, cSz)
alpha = 0.25
beta = 1. - alpha
cv2.putText(sphere, "CMU OpenFace", (50, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 2.,
(0, 0, 0), 1, cv2.cv.CV_AA)
cv2.addWeighted(frame, alpha, sphere, beta, 0.0, frame)
cv2.imshow('video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
| apache-2.0 |
gotomypc/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
syl20bnr/nupic | examples/opf/tools/testDiagnostics.py | 11 | 1762 | import numpy as np
############################################################################
def printMatrix(inputs, spOutput):
''' (i,j)th cell of the diff matrix will have the number of inputs for which the input and output
pattern differ by i bits and the cells activated differ at j places.
Parameters:
--------------------------------------------------------------------
inputs: the input encodings
spOutput: the coincidences activated in response to each input
'''
from pylab import matplotlib as mat
w=len(np.nonzero(inputs[0])[0])
numActive=len(np.nonzero(spOutput[0])[0])
matrix = np.zeros([2*w+1,2*numActive+1])
for x in xrange(len(inputs)):
i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]
j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]
for p, q in zip(i,j):
matrix[p,q]+=1
for y in xrange(len(matrix)) :
matrix[y]=[max(10*x, 100) if (x<100 and x>0) else x for x in matrix[y]]
cdict = {'red':((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.7),(1.0,1.0,1.0)),\
'green': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,1.0,1.0)),\
'blue': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,0.5,1.0))}
my_cmap = mat.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pyl=mat.pyplot
pyl.matshow(matrix, cmap = my_cmap)
pyl.colorbar()
pyl.ylabel('Number of bits by which the inputs differ')
pyl.xlabel('Number of cells by which input and output differ')
pyl.title('The difference matrix')
pyl.show()
################################################################################
def _hammingDistance(s1, s2):
"""Hamming distance between two numpy arrays s1 and s2"""
return sum(abs(s1-s2))
| gpl-3.0 |
timtammittee/thorns | thorns/util/dumpdb.py | 1 | 3516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module implements permanent store for data.
"""
from __future__ import division, print_function, absolute_import
from __future__ import unicode_literals
__author__ = "Marek Rudnicki"
import os
import datetime
import logging
from itertools import izip_longest
import shelve
import collections
import tables
import numpy as np
import pandas as pd
logger = logging.getLogger('thorns')
def get_store(name='store', workdir='work'):
"""Return a quick and dirty shelve based persisten dict-like store.
"""
fname = os.path.join(workdir, name + '.db')
if not os.path.exists(workdir):
os.makedirs(workdir)
store = shelve.open(fname, protocol=-1)
return store
def dumpdb(data, name='dump', workdir='work', kwargs=None):
"""Dump data in order to recall the most up-to-date records later.
Parameters
----------
data : pd.DataFrame
Data that will be appended to the database.
name : str, optional
Base name of the pickle file.
workdir : str, optional
Directory for the data.
kwargs : dict, optional
Additional parameters common for all data (MultiIndex will be
extended).
"""
fname = os.path.join(workdir, name+'.h5')
if not os.path.exists(workdir):
os.makedirs(workdir)
logger.info("Dumping data into {}.".format(fname))
if kwargs is not None:
for k,v in kwargs.items():
data[k] = v
data = data.set_index(kwargs.keys(), append=True)
now = datetime.datetime.now()
key = now.strftime("T%Y%m%d_%H%M%S_%f")
store = pd.io.pytables.HDFStore(fname, 'a')
store[key] = data
store.close()
def loaddb(name='dump', workdir='work', timestamp=False, load_all=False):
"""Recall dumped data discarding duplicated records.
Parameters
----------
name : str, optional
Base of the data filename.
workdir : str, optional
Directory where the data is stored.
timestamp : bool, optional
Add an extra column with timestamps to the index.
load_all : bool, optional
If True, data from all experiments will be loaded from the
dumpdb file. The default is to load only the most recent
data.
Returns
-------
pd.DataFrame
Data without duplicates.
"""
if timestamp:
raise NotImplementedError("Should add extra columnt with timestamps to the index of the output.")
fname = os.path.join(workdir, name+'.h5')
store = pd.io.pytables.HDFStore(fname, 'r')
logger.info("Loading data from {}".format(fname))
if load_all:
xkeys = collections.OrderedDict() # poor-man's ordered set
dbs = []
### Get all tables from the store
for t in sorted(store.keys()):
df = store[t]
# Just want ordered unique values in xkeys (ordered set would
# simplify it: orderedset.update(df.index.names))
for name in df.index.names:
xkeys.setdefault(name)
df = df.reset_index()
dbs.append(df)
db = pd.concat(dbs)
else:
last_key = sorted(store.keys())[-1]
df = store[last_key]
xkeys = df.index.names
db = df.reset_index()
store.close()
### Drop duplicates and set index
db = db.drop_duplicates(
subset=list(xkeys),
keep='last',
)
db = db.set_index(list(xkeys))
return db
| gpl-3.0 |
ajc158/beeworld | gigerommatidiamodelbeeworld.py | 1 | 4121 | import matplotlib.pyplot as plt
import math
import numpy
from mpl_toolkits.mplot3d import Axes3D
def vert(x):
return (0.000734*(x**2))-(0.1042253*x)+4.9
def horr(x):
if x>60:
return (0.00037*(x**2))-(0.04462*x)+3.438
else:
return (0.00069*(x**2))-(0.08333*x)+4.6
def radialDistortion(x,y):
camYaw=0.0/180.0*math.pi
camPitch=0.0/180.0*math.pi
camRoll=0.0/180.0*math.pi
camTrans=numpy.array([[0],[0],[0]])
camScaling = 1
camYM = numpy.matrix([[math.cos(camYaw),0,math.sin(camYaw)],[0,1,0],[-math.sin(camYaw),0,math.cos(camYaw)]])
camPM = numpy.matrix([[1,0,0],[0,math.cos(camPitch),-math.sin(camPitch)],[0,math.sin(camPitch),math.cos(camPitch)]])
camRM = numpy.matrix([[math.cos(camRoll),-math.sin(camRoll),0],[math.sin(camRoll),math.cos(camRoll),0],[0,0,1]])
# undo the camera rotation
# convert x,y into rotations
x = (x- 0)/180*math.pi
y = (y-70)/180*math.pi
ommYM = numpy.matrix([[math.cos(x),0,math.sin(x)],[0,1,0],[-math.sin(x),0,math.cos(x)]])
ommPM = numpy.matrix([[1,0,0],[0,math.cos(y),-math.sin(y)],[0,math.sin(y),math.cos(y)]])
forwardVect = numpy.array([[0],[0],[1]])
vect2 = ommYM*ommPM*forwardVect
#return vect2
vect2 = vect2 + camTrans
vect2 = camYM*camPM*camRM*vect2
if (vect2[2] > 0.05):
vect2 = vect2*camScaling/vect2[2]
else:
return numpy.array([[100000],[100000],[1]])
# normalise
# now translate x-y into pixels to account for distortion
r_c = math.sqrt((vect2[0])**2+(vect2[1])**2)
k_1 = -0.3112
k_2 = 0.10422
k_3 = 0
vect2[0] = vect2[0]*(1+k_1*r_c**2+k_2*r_c**4+k_3*r_c**6)
vect2[1] = vect2[1]*(1+k_1*r_c**2+k_2*r_c**4+k_3*r_c**6)
#vect2[0] = (vect2[0]+1.0)*(576.0/2.0)
#vect2[1] = (vect2[1]+1.0)*(480.0/2.0)
# return
# camera matrix:
f_x = 187.93014
f_y = 170.79432
s = 0
c_x = 185.139
c_y = 111.64043
camMat = numpy.matrix([[f_x,s,c_x],[0,f_y,c_y],[0,0,1]])
# apply
vect2 = camMat*vect2
#vect2[0] += c_x
#vect2[1] += c_y
return vect2
startX=60
startY=70
startPixX=30
startPixY=54
currX=startX
currY=startY
currPixX = startPixX
currPixY = startPixY
itr = 0
xPoints = []
yPoints = []
xPix = []
yPix = []
scale = 1.0
scaleV = 1.0
while currY<140:
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX<140:
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX+=horr(currY)*scale
currPixX+=1
currX=startX
currPixX=startPixX
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX>0:
currX-=horr(currY)*scale
currPixX-=1
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX=startX
currPixX=startPixX
currY+=vert(currX)*scale*scaleV
currPixY+=1
itr+=1
currY = startY
currPixY=startPixY
itr = 0
while currY>0:
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX<140:
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX+=horr(currY)*scale
currPixX+=1
currX=startX
currPixX=startPixX
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX>0:
currX-=horr(currY)*scale
currPixX-=1
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX=startX
currPixX=startPixX
currY-=vert(currX)*scale*scaleV
currPixY-=1
itr+=1
#plt.plot(xPoints,yPoints, 'bx')
#plt.show()
#plt.plot(xPix,yPix, 'bx')
#plt.show()
print min(xPix)
print min(yPix)
print max(xPix)
print max(yPix)
#
f = open('gigerdata.h', 'w')
f.write("#ifndef GIGERDATA_H\n#define GIGERDATA_H\n\nfloat gdata[][4] = { \ \n")
orderedCoords = sorted(zip(xPoints,yPoints,xPix,yPix))
count = 0
mooX = []
mooY = []
mooZ= []
for elem in orderedCoords:
if elem[1]>=0 and elem[1]<=140 and elem[0]>=0 and elem[0]<=140:
if elem[2]>=0 and elem[2]<70 and elem[3]>=0 and elem[3]<100:
f.write("{"+str(elem[0])+","+str(elem[1])+","+str(elem[2])+","+str(elem[3])+"}, \ \n")
count += 1
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(mooX,mooY,mooZ)# bx
#plt.plot(mooX,mooY,'bx')
#plt.axis([0,576,0,240])
#plt.show()
f.write("{0};\nint gdataLength = {1};\n\n#endif\n".format("}",count))
f.close(); | gpl-3.0 |
bitmonk/pgcli | pgcli/packages/tabulate.py | 28 | 38075 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from decimal import Decimal
from platform import python_version_tuple
from wcwidth import wcswidth
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
import io
def _is_file(f):
return isinstance(f, io.IOBase)
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.4"
MIN_PADDING = 2
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("β", "β", "β€", "β"),
linebelowheader=Line("β", "β", "βͺ", "β‘"),
linebetweenrows=Line("β", "β", "βΌ", "β€"),
linebelow=Line("β", "β", "β§", "β"),
headerrow=DataRow("β", "β", "β"),
datarow=DataRow("β", "β", "β"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is _int_type or type(string) is _long_type or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
if isinstance(string, (bool, Decimal,)):
return _text_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return ' ' * lwidth + s
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return s + ' ' * rwidth
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
lwidth = xwidth // 2
rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2
return ' ' * lwidth + s + ' ' * rwidth
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return wcswidth(_strip_invisible(s))
else:
return wcswidth(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
βββββββββββββ€ββββββββββββ
β strings β numbers β
βββββββββββββͺββββββββββββ‘
β spam β 41.9999 β
βββββββββββββΌββββββββββββ€
β eggs β 451 β
βββββββββββββ§ββββββββββββ
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1f:s:",
["help", "header", "format", "separator"])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
tablefmt = "simple"
sep = r"\s+"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep)
else:
with open(f) as fobj:
_pprint_file(fobj)
def _pprint_file(fobject, headers, tablefmt, sep):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows]
print(tabulate(table, headers, tablefmt))
if __name__ == "__main__":
_main()
| bsd-3-clause |
SeldonIO/seldon-server | docker/examples/tensorflow_deep_mnist/create_pipeline.py | 2 | 3690 | from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
import tensorflow as tf
from seldon.tensorflow_wrapper import TensorFlowWrapper
from sklearn.pipeline import Pipeline
import seldon.pipeline.util as sutl
import argparse
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def create_pipeline(load=None):
x = tf.placeholder(tf.float32, [None,784])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.initialize_all_variables()
sess = tf.Session()
if not load:
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
print 'Training model'
sess.run(init)
for i in range(20000):
batch_xs, batch_ys = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(session=sess,feed_dict={x:batch_xs, y_: batch_ys, keep_prob: 1.0})
print("step %d, training accuracy %.3f"%(i, train_accuracy))
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(session=sess,feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
else:
print 'Loading pre-trained model'
saver = tf.train.Saver()
saver.restore(sess,load)
# print("test accuracy %g"%accuracy.eval(session=sess,feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
# print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))
tfw = TensorFlowWrapper(sess,tf_input=x,tf_output=y_conv,tf_constants=[(keep_prob,1.0)],target="y",target_readable="class",excluded=['class'])
return Pipeline([('deep_classifier',tfw)])
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='pipeline_example')
parser.add_argument('-m','--model', help='model output folder', required=True)
parser.add_argument('-l','--load',help='Load pretrained model from file')
args = parser.parse_args()
p = create_pipeline(args.load)
pw = sutl.PipelineWrapper()
pw.save_pipeline(p,args.model)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.